index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 0
305k
| step-2
stringlengths 6
1.1M
⌀ | step-3
stringlengths 15
1.23M
⌀ | step-4
stringlengths 23
1.34M
⌀ | step-5
stringlengths 55
1.2M
⌀ | step-ids
listlengths 1
5
|
---|---|---|---|---|---|---|---|
2,400 |
cbf93eb96f40ff0aedc4b8d9238669da72934b27
|
<mask token>
class ascii_handler(port_handler):
<mask token>
<mask token>
|
<mask token>
class ascii_handler(port_handler):
<mask token>
def handle_data(self):
"""
Show a nicely formatted server list and immediately close connection
"""
self.ls.log.info('Sending ascii server list to %s' % self.ip)
self.cleanup()
servers = fetch_all(
'SELECT * FROM servers WHERE max > 0 ORDER BY prefer DESC, private ASC, (players = max) ASC, players DESC, created ASC'
)
asciilist = ''
server_count = 0
for server in servers:
try:
entry = server['ip'] + ':' + str(server['port']) + ' '
entry += 'local ' if server['remote'] == 0 else 'mirror '
entry += 'public ' if server['private'] == 0 else 'private '
entry += server['mode'] + ' '
entry += server['version'][:6].ljust(6, ' ') + ' '
entry += str(int(time.time()) - int(server['created'])) + ' '
entry += '[' + str(server['players']) + '/' + str(server['max']
) + '] '
entry += server['name'] + '\r\n'
asciilist += entry
server_count += 1
except TypeError:
continue
self.msg(asciilist)
self.end()
|
<mask token>
class ascii_handler(port_handler):
"""
Serve ASCII server list
"""
def handle_data(self):
"""
Show a nicely formatted server list and immediately close connection
"""
self.ls.log.info('Sending ascii server list to %s' % self.ip)
self.cleanup()
servers = fetch_all(
'SELECT * FROM servers WHERE max > 0 ORDER BY prefer DESC, private ASC, (players = max) ASC, players DESC, created ASC'
)
asciilist = ''
server_count = 0
for server in servers:
try:
entry = server['ip'] + ':' + str(server['port']) + ' '
entry += 'local ' if server['remote'] == 0 else 'mirror '
entry += 'public ' if server['private'] == 0 else 'private '
entry += server['mode'] + ' '
entry += server['version'][:6].ljust(6, ' ') + ' '
entry += str(int(time.time()) - int(server['created'])) + ' '
entry += '[' + str(server['players']) + '/' + str(server['max']
) + '] '
entry += server['name'] + '\r\n'
asciilist += entry
server_count += 1
except TypeError:
continue
self.msg(asciilist)
self.end()
|
import time
from helpers.handler import port_handler
from helpers.functions import fetch_all
class ascii_handler(port_handler):
"""
Serve ASCII server list
"""
def handle_data(self):
"""
Show a nicely formatted server list and immediately close connection
"""
self.ls.log.info('Sending ascii server list to %s' % self.ip)
self.cleanup()
servers = fetch_all(
'SELECT * FROM servers WHERE max > 0 ORDER BY prefer DESC, private ASC, (players = max) ASC, players DESC, created ASC'
)
asciilist = ''
server_count = 0
for server in servers:
try:
entry = server['ip'] + ':' + str(server['port']) + ' '
entry += 'local ' if server['remote'] == 0 else 'mirror '
entry += 'public ' if server['private'] == 0 else 'private '
entry += server['mode'] + ' '
entry += server['version'][:6].ljust(6, ' ') + ' '
entry += str(int(time.time()) - int(server['created'])) + ' '
entry += '[' + str(server['players']) + '/' + str(server['max']
) + '] '
entry += server['name'] + '\r\n'
asciilist += entry
server_count += 1
except TypeError:
continue
self.msg(asciilist)
self.end()
|
import time
from helpers.handler import port_handler
from helpers.functions import fetch_all
class ascii_handler(port_handler):
"""
Serve ASCII server list
"""
def handle_data(self):
"""
Show a nicely formatted server list and immediately close connection
"""
self.ls.log.info("Sending ascii server list to %s" % self.ip)
self.cleanup()
servers = fetch_all(
"SELECT * FROM servers WHERE max > 0 ORDER BY prefer DESC, private ASC, (players = max) ASC, players DESC, created ASC")
asciilist = ""
server_count = 0
for server in servers:
try:
entry = server['ip'] + ':' + str(server['port']) + ' ' # ip:port
entry += 'local ' if server['remote'] == 0 else 'mirror ' # 'local' or 'mirror'
entry += 'public ' if server['private'] == 0 else 'private ' # 'public' or 'private'
entry += server['mode'] + ' ' # game mode
entry += server['version'][:6].ljust(6, ' ') + ' ' # version
entry += str(int(time.time()) - int(server['created'])) + ' ' # uptime in seconds
entry += '[' + str(server['players']) + '/' + str(server['max']) + '] ' # [players/max]
entry += server['name'] + "\r\n" # server name
asciilist += entry
server_count += 1
except TypeError:
continue
self.msg(asciilist)
self.end()
|
[
1,
2,
3,
4,
5
] |
2,401 |
19d86c64876575ed9b3f5e33dd44e7633c96e696
|
<mask token>
class product_product(orm.Model):
<mask token>
def get_kits_product_available(self, cr, uid, ids, context=None):
pass
def _kits_product_available(self, cr, uid, ids, field_names=None, arg=
False, context=None):
res = {}
field_names = field_names or []
context = context or {}
for id in ids:
res[id] = {}.fromkeys(field_names, 0.0)
field_map = {'kits_qty_available': 'qty_available',
'kits_incoming_qty': 'incoming_qty', 'kits_outgoing_qty':
'outgoing_qty', 'kits_virtual_available': 'virtual_available'}
for product_record in self.browse(cr, uid, ids, context=context):
so_qty = self._get_sale_quotation_qty(cr, uid, product_record.
id, context=context)
if not self._is_kit(cr, uid, [product_record.id], context=context
).get(product_record.id):
res[product_record.id] = {'kits_qty_available': 0,
'kits_incoming_qty': 0, 'kits_virtual_available': 0,
'kits_outgoing_qty': 0, 'kits_sale_quotation_qty': so_qty}
else:
for bom in product_record.bom_ids:
if bom.type == 'phantom':
child_product_res = {}
for line in bom.bom_lines:
child_product_res[line.product_id.id] = {
'product_qty': line.product_qty or 0.0}
child_product_qtys = self._product_available(cr,
uid, child_product_res.keys(), field_map.values
(), context=context)
res[product_record.id] = {'kits_qty_available':
self._get_qty_from_children(child_product_qtys,
child_product_res, 'qty_available'),
'kits_incoming_qty': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'incoming_qty'),
'kits_virtual_available': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'virtual_available') -
so_qty, 'kits_outgoing_qty': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'outgoing_qty'),
'kits_sale_quotation_qty': so_qty}
else:
raw_res = self._product_available(cr, uid, ids,
field_map.values(), arg, context)
for key, val in field_map.items():
res[product_record.id][key] = raw_res[
product_record.id].get(val)
break
return res
def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):
"""get all qty of the product in all sale quotations (draft, sent)"""
sol_obj = self.pool.get('sale.order.line')
domain = [('state', 'in', ('draft', False, None)), ('product_id',
'=', product_id)]
sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty',
'product_id'], groupby=['product_id'])
return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0
<mask token>
def _is_kit(self, cr, uid, ids, fields=None, args=False, context=None):
"""see if this product is Kit or not"""
res = {}
for product_record in self.browse(cr, uid, ids, context=context):
res[product_record.id] = False
if product_record.bom_ids:
for bom in product_record.bom_ids:
if bom.type == 'phantom':
res[product_record.id] = True
return res
def _get_product_from_bom(self, cr, uid, ids, context=None):
res = {}
bom_ids = self.pool.get('mrp.bom').browse(cr, uid, ids, context=context
)
for bom in bom_ids:
res[bom.product_id.id] = True
return res.keys()
<mask token>
|
<mask token>
class product_product(orm.Model):
<mask token>
def get_kits_product_available(self, cr, uid, ids, context=None):
pass
def _kits_product_available(self, cr, uid, ids, field_names=None, arg=
False, context=None):
res = {}
field_names = field_names or []
context = context or {}
for id in ids:
res[id] = {}.fromkeys(field_names, 0.0)
field_map = {'kits_qty_available': 'qty_available',
'kits_incoming_qty': 'incoming_qty', 'kits_outgoing_qty':
'outgoing_qty', 'kits_virtual_available': 'virtual_available'}
for product_record in self.browse(cr, uid, ids, context=context):
so_qty = self._get_sale_quotation_qty(cr, uid, product_record.
id, context=context)
if not self._is_kit(cr, uid, [product_record.id], context=context
).get(product_record.id):
res[product_record.id] = {'kits_qty_available': 0,
'kits_incoming_qty': 0, 'kits_virtual_available': 0,
'kits_outgoing_qty': 0, 'kits_sale_quotation_qty': so_qty}
else:
for bom in product_record.bom_ids:
if bom.type == 'phantom':
child_product_res = {}
for line in bom.bom_lines:
child_product_res[line.product_id.id] = {
'product_qty': line.product_qty or 0.0}
child_product_qtys = self._product_available(cr,
uid, child_product_res.keys(), field_map.values
(), context=context)
res[product_record.id] = {'kits_qty_available':
self._get_qty_from_children(child_product_qtys,
child_product_res, 'qty_available'),
'kits_incoming_qty': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'incoming_qty'),
'kits_virtual_available': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'virtual_available') -
so_qty, 'kits_outgoing_qty': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'outgoing_qty'),
'kits_sale_quotation_qty': so_qty}
else:
raw_res = self._product_available(cr, uid, ids,
field_map.values(), arg, context)
for key, val in field_map.items():
res[product_record.id][key] = raw_res[
product_record.id].get(val)
break
return res
def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):
"""get all qty of the product in all sale quotations (draft, sent)"""
sol_obj = self.pool.get('sale.order.line')
domain = [('state', 'in', ('draft', False, None)), ('product_id',
'=', product_id)]
sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty',
'product_id'], groupby=['product_id'])
return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0
def _get_qty_from_children(self, child_product_qtys, child_product_res,
field_name):
def qty_div(product_total_qty, component_qty):
return product_total_qty[1].get(field_name) / component_qty[1].get(
'product_qty')
return min(map(qty_div, child_product_qtys.iteritems(),
child_product_res.iteritems()))
def _is_kit(self, cr, uid, ids, fields=None, args=False, context=None):
"""see if this product is Kit or not"""
res = {}
for product_record in self.browse(cr, uid, ids, context=context):
res[product_record.id] = False
if product_record.bom_ids:
for bom in product_record.bom_ids:
if bom.type == 'phantom':
res[product_record.id] = True
return res
def _get_product_from_bom(self, cr, uid, ids, context=None):
res = {}
bom_ids = self.pool.get('mrp.bom').browse(cr, uid, ids, context=context
)
for bom in bom_ids:
res[bom.product_id.id] = True
return res.keys()
<mask token>
|
<mask token>
class product_product(orm.Model):
_inherit = 'product.product'
def get_kits_product_available(self, cr, uid, ids, context=None):
pass
def _kits_product_available(self, cr, uid, ids, field_names=None, arg=
False, context=None):
res = {}
field_names = field_names or []
context = context or {}
for id in ids:
res[id] = {}.fromkeys(field_names, 0.0)
field_map = {'kits_qty_available': 'qty_available',
'kits_incoming_qty': 'incoming_qty', 'kits_outgoing_qty':
'outgoing_qty', 'kits_virtual_available': 'virtual_available'}
for product_record in self.browse(cr, uid, ids, context=context):
so_qty = self._get_sale_quotation_qty(cr, uid, product_record.
id, context=context)
if not self._is_kit(cr, uid, [product_record.id], context=context
).get(product_record.id):
res[product_record.id] = {'kits_qty_available': 0,
'kits_incoming_qty': 0, 'kits_virtual_available': 0,
'kits_outgoing_qty': 0, 'kits_sale_quotation_qty': so_qty}
else:
for bom in product_record.bom_ids:
if bom.type == 'phantom':
child_product_res = {}
for line in bom.bom_lines:
child_product_res[line.product_id.id] = {
'product_qty': line.product_qty or 0.0}
child_product_qtys = self._product_available(cr,
uid, child_product_res.keys(), field_map.values
(), context=context)
res[product_record.id] = {'kits_qty_available':
self._get_qty_from_children(child_product_qtys,
child_product_res, 'qty_available'),
'kits_incoming_qty': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'incoming_qty'),
'kits_virtual_available': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'virtual_available') -
so_qty, 'kits_outgoing_qty': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'outgoing_qty'),
'kits_sale_quotation_qty': so_qty}
else:
raw_res = self._product_available(cr, uid, ids,
field_map.values(), arg, context)
for key, val in field_map.items():
res[product_record.id][key] = raw_res[
product_record.id].get(val)
break
return res
def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):
"""get all qty of the product in all sale quotations (draft, sent)"""
sol_obj = self.pool.get('sale.order.line')
domain = [('state', 'in', ('draft', False, None)), ('product_id',
'=', product_id)]
sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty',
'product_id'], groupby=['product_id'])
return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0
def _get_qty_from_children(self, child_product_qtys, child_product_res,
field_name):
def qty_div(product_total_qty, component_qty):
return product_total_qty[1].get(field_name) / component_qty[1].get(
'product_qty')
return min(map(qty_div, child_product_qtys.iteritems(),
child_product_res.iteritems()))
def _is_kit(self, cr, uid, ids, fields=None, args=False, context=None):
"""see if this product is Kit or not"""
res = {}
for product_record in self.browse(cr, uid, ids, context=context):
res[product_record.id] = False
if product_record.bom_ids:
for bom in product_record.bom_ids:
if bom.type == 'phantom':
res[product_record.id] = True
return res
def _get_product_from_bom(self, cr, uid, ids, context=None):
res = {}
bom_ids = self.pool.get('mrp.bom').browse(cr, uid, ids, context=context
)
for bom in bom_ids:
res[bom.product_id.id] = True
return res.keys()
_columns = {'is_kit': fields.function(_is_kit, readonly=True, type=
'boolean', string='Is Kit', store={'mrp.bom': (
_get_product_from_bom, ['type'], 10)}), 'kits_qty_available':
fields.function(_kits_product_available, multi='kits_qty_available',
type='float', digits_compute=dp.get_precision(
'Product Unit of Measure'), string='Quantity On Hand (Kits)', help=
''), 'kits_incoming_qty': fields.function(_kits_product_available,
multi='kits_qty_available', type='float', digits_compute=dp.
get_precision('Product Unit of Measure'), string='Incoming (Kits)',
help=''), 'kits_outgoing_qty': fields.function(
_kits_product_available, multi='kits_qty_available', type='float',
digits_compute=dp.get_precision('Product Unit of Measure'), string=
'Outgoing (Kits)', help=''), 'kits_sale_quotation_qty': fields.
function(_kits_product_available, multi='kits_qty_available', type=
'float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Sales Quotation Allocated', help=''),
'kits_virtual_available': fields.function(_kits_product_available,
multi='kits_qty_available', type='float', digits_compute=dp.
get_precision('Product Unit of Measure'), string=
'Forecasted Quantity (Kits)', help='')}
|
from openerp.osv import orm, fields
import openerp.addons.decimal_precision as dp
class product_product(orm.Model):
_inherit = 'product.product'
def get_kits_product_available(self, cr, uid, ids, context=None):
pass
def _kits_product_available(self, cr, uid, ids, field_names=None, arg=
False, context=None):
res = {}
field_names = field_names or []
context = context or {}
for id in ids:
res[id] = {}.fromkeys(field_names, 0.0)
field_map = {'kits_qty_available': 'qty_available',
'kits_incoming_qty': 'incoming_qty', 'kits_outgoing_qty':
'outgoing_qty', 'kits_virtual_available': 'virtual_available'}
for product_record in self.browse(cr, uid, ids, context=context):
so_qty = self._get_sale_quotation_qty(cr, uid, product_record.
id, context=context)
if not self._is_kit(cr, uid, [product_record.id], context=context
).get(product_record.id):
res[product_record.id] = {'kits_qty_available': 0,
'kits_incoming_qty': 0, 'kits_virtual_available': 0,
'kits_outgoing_qty': 0, 'kits_sale_quotation_qty': so_qty}
else:
for bom in product_record.bom_ids:
if bom.type == 'phantom':
child_product_res = {}
for line in bom.bom_lines:
child_product_res[line.product_id.id] = {
'product_qty': line.product_qty or 0.0}
child_product_qtys = self._product_available(cr,
uid, child_product_res.keys(), field_map.values
(), context=context)
res[product_record.id] = {'kits_qty_available':
self._get_qty_from_children(child_product_qtys,
child_product_res, 'qty_available'),
'kits_incoming_qty': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'incoming_qty'),
'kits_virtual_available': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'virtual_available') -
so_qty, 'kits_outgoing_qty': self.
_get_qty_from_children(child_product_qtys,
child_product_res, 'outgoing_qty'),
'kits_sale_quotation_qty': so_qty}
else:
raw_res = self._product_available(cr, uid, ids,
field_map.values(), arg, context)
for key, val in field_map.items():
res[product_record.id][key] = raw_res[
product_record.id].get(val)
break
return res
def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):
"""get all qty of the product in all sale quotations (draft, sent)"""
sol_obj = self.pool.get('sale.order.line')
domain = [('state', 'in', ('draft', False, None)), ('product_id',
'=', product_id)]
sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty',
'product_id'], groupby=['product_id'])
return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0
def _get_qty_from_children(self, child_product_qtys, child_product_res,
field_name):
def qty_div(product_total_qty, component_qty):
return product_total_qty[1].get(field_name) / component_qty[1].get(
'product_qty')
return min(map(qty_div, child_product_qtys.iteritems(),
child_product_res.iteritems()))
def _is_kit(self, cr, uid, ids, fields=None, args=False, context=None):
"""see if this product is Kit or not"""
res = {}
for product_record in self.browse(cr, uid, ids, context=context):
res[product_record.id] = False
if product_record.bom_ids:
for bom in product_record.bom_ids:
if bom.type == 'phantom':
res[product_record.id] = True
return res
def _get_product_from_bom(self, cr, uid, ids, context=None):
res = {}
bom_ids = self.pool.get('mrp.bom').browse(cr, uid, ids, context=context
)
for bom in bom_ids:
res[bom.product_id.id] = True
return res.keys()
_columns = {'is_kit': fields.function(_is_kit, readonly=True, type=
'boolean', string='Is Kit', store={'mrp.bom': (
_get_product_from_bom, ['type'], 10)}), 'kits_qty_available':
fields.function(_kits_product_available, multi='kits_qty_available',
type='float', digits_compute=dp.get_precision(
'Product Unit of Measure'), string='Quantity On Hand (Kits)', help=
''), 'kits_incoming_qty': fields.function(_kits_product_available,
multi='kits_qty_available', type='float', digits_compute=dp.
get_precision('Product Unit of Measure'), string='Incoming (Kits)',
help=''), 'kits_outgoing_qty': fields.function(
_kits_product_available, multi='kits_qty_available', type='float',
digits_compute=dp.get_precision('Product Unit of Measure'), string=
'Outgoing (Kits)', help=''), 'kits_sale_quotation_qty': fields.
function(_kits_product_available, multi='kits_qty_available', type=
'float', digits_compute=dp.get_precision('Product Unit of Measure'),
string='Sales Quotation Allocated', help=''),
'kits_virtual_available': fields.function(_kits_product_available,
multi='kits_qty_available', type='float', digits_compute=dp.
get_precision('Product Unit of Measure'), string=
'Forecasted Quantity (Kits)', help='')}
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2014 Elico Corp. All Rights Reserved.
# Alex Duan <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
import openerp.addons.decimal_precision as dp
class product_product(orm.Model):
_inherit = 'product.product'
def get_kits_product_available(self, cr, uid, ids, context=None):
pass
def _kits_product_available(self, cr, uid, ids, field_names=None, arg=False, context=None):
res = {}
field_names = field_names or []
context = context or {}
for id in ids:
res[id] = {}.fromkeys(field_names, 0.0)
field_map = {
'kits_qty_available': 'qty_available',
'kits_incoming_qty': 'incoming_qty',
'kits_outgoing_qty': 'outgoing_qty',
'kits_virtual_available': 'virtual_available'
}
for product_record in self.browse(cr, uid, ids, context=context):
#check if is a kit product.
so_qty = self._get_sale_quotation_qty(cr, uid, product_record.id, context=context)
if not self._is_kit(
cr, uid,
[product_record.id],
context=context).get(product_record.id):
res[product_record.id] = {
'kits_qty_available': 0,
'kits_incoming_qty': 0,
'kits_virtual_available': 0,
'kits_outgoing_qty': 0,
'kits_sale_quotation_qty': so_qty
}
#product with no bom
# if not product_record.bom_ids:
# raw_res = self._product_available(cr, uid, [product_record.id], field_map.values(), arg, context)
# for key, val in field_map.items():
# res[product_record.id][key] = raw_res[product_record.id].get(val)
#TODO how to deal with multi-bom products.
#now get always get the first bom.
#product with bom
else:
for bom in product_record.bom_ids:
#bom type is phantom
#TODO take care of the valid date of the components
if bom.type == 'phantom':
child_product_res = {}
for line in bom.bom_lines:
child_product_res[line.product_id.id] = {'product_qty': line.product_qty or 0.0}
child_product_qtys = self._product_available(cr, uid, child_product_res.keys(), field_map.values(), context=context)
res[product_record.id] = {
'kits_qty_available': self._get_qty_from_children(child_product_qtys, child_product_res, 'qty_available'),
'kits_incoming_qty': self._get_qty_from_children(child_product_qtys, child_product_res, 'incoming_qty'),
'kits_virtual_available': self._get_qty_from_children(child_product_qtys, child_product_res, 'virtual_available') - so_qty,
'kits_outgoing_qty': self._get_qty_from_children(child_product_qtys, child_product_res, 'outgoing_qty'),
'kits_sale_quotation_qty': so_qty
}
else:
raw_res = self._product_available(cr, uid, ids, field_map.values(), arg, context)
for key, val in field_map.items():
res[product_record.id][key] = raw_res[product_record.id].get(val)
#only get the first bom.
break
return res
def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):
'''get all qty of the product in all sale quotations (draft, sent)'''
sol_obj = self.pool.get('sale.order.line')
domain = [('state', 'in', ('draft', False, None)), ('product_id', '=', product_id)]
#TODO take care of the uom.
sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty', 'product_id'], groupby=['product_id'])
return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0
def _get_qty_from_children(self, child_product_qtys, child_product_res, field_name):
def qty_div(product_total_qty, component_qty):
return product_total_qty[1].get(field_name) / component_qty[1].get('product_qty')
# import pdb
# pdb.set_trace()
return min(map(qty_div, child_product_qtys.iteritems(), child_product_res.iteritems()))
def _is_kit(self, cr, uid, ids, fields=None, args=False, context=None):
'''see if this product is Kit or not'''
res = {}
for product_record in self.browse(cr, uid, ids, context=context):
res[product_record.id] = False
if product_record.bom_ids:
for bom in product_record.bom_ids:
if bom.type == 'phantom':
res[product_record.id] = True
return res
def _get_product_from_bom(self, cr, uid, ids, context=None):
res = {}
bom_ids = self.pool.get('mrp.bom').browse(cr, uid, ids, context=context)
for bom in bom_ids:
res[bom.product_id.id] = True
return res.keys()
_columns = {
'is_kit': fields.function(
_is_kit,
readonly=True,
type='boolean',
string='Is Kit',
store={
'mrp.bom': (_get_product_from_bom, ['type'], 10)
}),
'kits_qty_available': fields.function(
_kits_product_available,
multi='kits_qty_available',
type='float',
digits_compute=dp.get_precision('Product Unit of Measure'),
string='Quantity On Hand (Kits)',
help=""),
'kits_incoming_qty': fields.function(
_kits_product_available,
multi='kits_qty_available',
type='float',
digits_compute=dp.get_precision('Product Unit of Measure'),
string='Incoming (Kits)',
help=""),
'kits_outgoing_qty': fields.function(
_kits_product_available,
multi='kits_qty_available',
type='float',
digits_compute=dp.get_precision('Product Unit of Measure'),
string='Outgoing (Kits)',
help=""),
'kits_sale_quotation_qty': fields.function(
_kits_product_available,
multi='kits_qty_available',
type='float',
digits_compute=dp.get_precision('Product Unit of Measure'),
string='Sales Quotation Allocated',
help=""),
'kits_virtual_available': fields.function(
_kits_product_available,
multi='kits_qty_available',
type='float',
digits_compute=dp.get_precision('Product Unit of Measure'),
string='Forecasted Quantity (Kits)',
help=""),
}
|
[
6,
7,
8,
9,
10
] |
2,402 |
a7099b2506de08893ca849146813505d88784895
|
<mask token>
|
<mask token>
class WGANUpdater(chainer.training.updaters.StandardUpdater):
def __init__(self, *args, **kwargs):
self.gen, self.dis = kwargs.pop('models')
self.n_dis = kwargs.pop('n_dis')
self.lam = kwargs.pop('lam')
self.iteration = 0
super(WGANUpdater, self).__init__(*args, **kwargs)
def loss_gen(self, gen, y_fake):
batchsize = len(y_fake)
loss = F.sum(-y_fake) / batchsize
chainer.reporter.report({'loss': loss}, gen)
return loss
def loss_dis(self, dis, y_real, y_fake, x_real, x_fake):
batchsize = len(y_fake)
xp = dis.xp
eps = xp.random.uniform(0, 1, size=batchsize).astype('f')[:, None,
None, None]
x_mid = eps * x_real + (1.0 - eps) * x_fake
y_mid, _ = self.dis(x_mid)
grad, = chainer.grad([y_mid], [x_mid], enable_double_backprop=True)
grad = F.sqrt(F.batch_l2_norm_squared(grad))
loss_grad = self.lam * F.mean_squared_error(grad, xp.ones_like(grad
.data))
loss = F.sum(-y_real) / batchsize
loss += F.sum(y_fake) / batchsize
wasserstein_distance = -loss
loss += loss_grad
chainer.reporter.report({'wasserstein_distance':
wasserstein_distance, 'loss_grad': loss_grad})
chainer.reporter.report({'loss': loss}, dis)
return loss
<mask token>
|
<mask token>
class WGANUpdater(chainer.training.updaters.StandardUpdater):
def __init__(self, *args, **kwargs):
self.gen, self.dis = kwargs.pop('models')
self.n_dis = kwargs.pop('n_dis')
self.lam = kwargs.pop('lam')
self.iteration = 0
super(WGANUpdater, self).__init__(*args, **kwargs)
def loss_gen(self, gen, y_fake):
batchsize = len(y_fake)
loss = F.sum(-y_fake) / batchsize
chainer.reporter.report({'loss': loss}, gen)
return loss
def loss_dis(self, dis, y_real, y_fake, x_real, x_fake):
batchsize = len(y_fake)
xp = dis.xp
eps = xp.random.uniform(0, 1, size=batchsize).astype('f')[:, None,
None, None]
x_mid = eps * x_real + (1.0 - eps) * x_fake
y_mid, _ = self.dis(x_mid)
grad, = chainer.grad([y_mid], [x_mid], enable_double_backprop=True)
grad = F.sqrt(F.batch_l2_norm_squared(grad))
loss_grad = self.lam * F.mean_squared_error(grad, xp.ones_like(grad
.data))
loss = F.sum(-y_real) / batchsize
loss += F.sum(y_fake) / batchsize
wasserstein_distance = -loss
loss += loss_grad
chainer.reporter.report({'wasserstein_distance':
wasserstein_distance, 'loss_grad': loss_grad})
chainer.reporter.report({'loss': loss}, dis)
return loss
def update_core(self):
gen_optimizer = self.get_optimizer('gen')
dis_optimizer = self.get_optimizer('dis')
xp = self.gen.xp
for i in range(self.n_dis):
batch = self.get_iterator('main').next()
batchsize = len(batch)
x = []
for j in range(batchsize):
x.append(np.asarray(batch[j]).astype('f'))
x_real = Variable(xp.asarray(x))
y_real, _ = self.dis(x_real)
z = Variable(xp.asarray(self.gen.make_hidden(batchsize)))
x_fake = self.gen(z)
y_fake, _ = self.dis(x_fake)
if i == 0:
gen_optimizer.update(self.loss_gen, self.gen, y_fake)
x_fake.unchain_backward()
dis_optimizer.update(self.loss_dis, self.dis, y_real, y_fake,
x_real, x_fake)
|
import numpy as np
import chainer
import chainer.functions as F
from chainer import Variable
from chainer.dataset import convert
class WGANUpdater(chainer.training.updaters.StandardUpdater):
def __init__(self, *args, **kwargs):
self.gen, self.dis = kwargs.pop('models')
self.n_dis = kwargs.pop('n_dis')
self.lam = kwargs.pop('lam')
self.iteration = 0
super(WGANUpdater, self).__init__(*args, **kwargs)
def loss_gen(self, gen, y_fake):
batchsize = len(y_fake)
loss = F.sum(-y_fake) / batchsize
chainer.reporter.report({'loss': loss}, gen)
return loss
def loss_dis(self, dis, y_real, y_fake, x_real, x_fake):
batchsize = len(y_fake)
xp = dis.xp
eps = xp.random.uniform(0, 1, size=batchsize).astype('f')[:, None,
None, None]
x_mid = eps * x_real + (1.0 - eps) * x_fake
y_mid, _ = self.dis(x_mid)
grad, = chainer.grad([y_mid], [x_mid], enable_double_backprop=True)
grad = F.sqrt(F.batch_l2_norm_squared(grad))
loss_grad = self.lam * F.mean_squared_error(grad, xp.ones_like(grad
.data))
loss = F.sum(-y_real) / batchsize
loss += F.sum(y_fake) / batchsize
wasserstein_distance = -loss
loss += loss_grad
chainer.reporter.report({'wasserstein_distance':
wasserstein_distance, 'loss_grad': loss_grad})
chainer.reporter.report({'loss': loss}, dis)
return loss
def update_core(self):
gen_optimizer = self.get_optimizer('gen')
dis_optimizer = self.get_optimizer('dis')
xp = self.gen.xp
for i in range(self.n_dis):
batch = self.get_iterator('main').next()
batchsize = len(batch)
x = []
for j in range(batchsize):
x.append(np.asarray(batch[j]).astype('f'))
x_real = Variable(xp.asarray(x))
y_real, _ = self.dis(x_real)
z = Variable(xp.asarray(self.gen.make_hidden(batchsize)))
x_fake = self.gen(z)
y_fake, _ = self.dis(x_fake)
if i == 0:
gen_optimizer.update(self.loss_gen, self.gen, y_fake)
x_fake.unchain_backward()
dis_optimizer.update(self.loss_dis, self.dis, y_real, y_fake,
x_real, x_fake)
|
#!/usr/bin/python3
#https://github.com/pfnet-research/chainer-gan-lib/blob/master/wgan_gp/updater.py
import numpy as np
import chainer
import chainer.functions as F
from chainer import Variable
from chainer.dataset import convert
class WGANUpdater(chainer.training.updaters.StandardUpdater):
def __init__(self, *args, **kwargs):
self.gen, self.dis = kwargs.pop('models')
self.n_dis = kwargs.pop('n_dis')
self.lam = kwargs.pop('lam')
self.iteration = 0
super(WGANUpdater, self).__init__(*args, **kwargs)
def loss_gen(self, gen, y_fake):
batchsize = len(y_fake)
loss = F.sum(-y_fake)/batchsize
chainer.reporter.report({'loss': loss}, gen)
return loss
def loss_dis(self, dis, y_real, y_fake, x_real, x_fake):
batchsize = len(y_fake)
xp = dis.xp
eps = xp.random.uniform(0, 1, size=batchsize)\
.astype("f")[:, None, None, None]
x_mid = eps * x_real + (1.0 - eps) * x_fake
y_mid,_ = self.dis(x_mid)
grad, = chainer.grad([y_mid], [x_mid], enable_double_backprop=True)
grad = F.sqrt(F.batch_l2_norm_squared(grad))
loss_grad = self.lam * F.mean_squared_error(grad,
xp.ones_like(grad.data))
loss = F.sum(-y_real) / batchsize
loss += F.sum(y_fake) / batchsize
wasserstein_distance = -loss
loss += loss_grad
chainer.reporter.report({'wasserstein_distance': wasserstein_distance,
'loss_grad':loss_grad})
chainer.reporter.report({'loss': loss}, dis)
return loss
def update_core(self):
gen_optimizer = self.get_optimizer('gen')
dis_optimizer = self.get_optimizer('dis')
xp = self.gen.xp
for i in range(self.n_dis):
batch = self.get_iterator('main').next()
batchsize = len(batch)
x = []
for j in range(batchsize):
x.append(np.asarray(batch[j]).astype("f"))
x_real = Variable(xp.asarray(x))
y_real,_ = self.dis(x_real)
z = Variable(xp.asarray(self.gen.make_hidden(batchsize)))
x_fake = self.gen(z)
y_fake,_ = self.dis(x_fake)
if i == 0:
gen_optimizer.update(self.loss_gen, self.gen, y_fake)
x_fake.unchain_backward()
dis_optimizer.update(self.loss_dis, self.dis,
y_real, y_fake, x_real, x_fake)
|
[
0,
4,
5,
6,
7
] |
2,403 |
cb904408486ad9ea8cc0c8ff2ec393e480309a57
|
<mask token>
|
<mask token>
result_dir = 'results'
data_dir = 'datasets'
cache_dir = f'{ROOT_PATH}/data/cache'
run_dir_ignore = ['results', 'datasets', 'cache']
use_treeconnect = False
treeconnect_threshold = 1024
vgg16 = 'vgg16_zhang_perceptual.pkl'
model = 'stylegan2-ffhq-config-f.pkl'
networks_urls = {'european': [
'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',
'generator_model-stylegan2-config-f.pkl'], 'asian': [
'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',
'generator_yellow-stylegan2-config-f.pkl'], 'asian beauty': [
'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',
'generator_star-stylegan2-config-f.pkl'], 'baby': [
'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',
'generator_baby-stylegan2-config-f.pkl']}
|
<mask token>
from facegan import ROOT_PATH
result_dir = 'results'
data_dir = 'datasets'
cache_dir = f'{ROOT_PATH}/data/cache'
run_dir_ignore = ['results', 'datasets', 'cache']
use_treeconnect = False
treeconnect_threshold = 1024
vgg16 = 'vgg16_zhang_perceptual.pkl'
model = 'stylegan2-ffhq-config-f.pkl'
networks_urls = {'european': [
'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',
'generator_model-stylegan2-config-f.pkl'], 'asian': [
'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',
'generator_yellow-stylegan2-config-f.pkl'], 'asian beauty': [
'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',
'generator_star-stylegan2-config-f.pkl'], 'baby': [
'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',
'generator_baby-stylegan2-config-f.pkl']}
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Global configuration."""
# ----------------------------------------------------------------------------
# Paths.
from facegan import ROOT_PATH
result_dir = 'results'
data_dir = 'datasets'
cache_dir = f'{ROOT_PATH}/data/cache'
run_dir_ignore = ['results', 'datasets', 'cache']
# experimental - replace Dense layers with TreeConnect
use_treeconnect = False
treeconnect_threshold = 1024
# ----------------------------------------------------------------------------
vgg16 = 'vgg16_zhang_perceptual.pkl'
model = 'stylegan2-ffhq-config-f.pkl'
networks_urls = {
'european': [
'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',
'generator_model-stylegan2-config-f.pkl'
],
'asian': [
'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',
'generator_yellow-stylegan2-config-f.pkl'
],
'asian beauty': [
'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',
'generator_star-stylegan2-config-f.pkl'
],
'baby': [
'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',
'generator_baby-stylegan2-config-f.pkl'
],
}
| null |
[
0,
1,
2,
3
] |
2,404 |
e30aaf1616a107662924da3671b179a1887974f7
|
<mask token>
def get_req_var(var):
result = 0
for s in vars:
s = re.search('((?<=' + var + '>).+)', s)
if s:
result = s[0]
break
return result
<mask token>
@app.route('/')
def home():
return render_template('index.html')
<mask token>
@app.route('/display/<filename>')
def display_image(filename):
filename = 'uploaded_images/' + filename
return redirect(url_for('static', filename=filename), code=301)
@app.route('/download/<filename>')
def download_image(filename):
filename = 'static/uploaded_images/' + filename
return send_file(filename, as_attachment=True)
@app.route('/send-mail/<filename>')
def send_mail(filename):
filename = 'static/uploaded_images/' + filename
mail = Mail(app)
mail.init_app(app)
msg = Message('Sent from flask_app', sender=app.config['MAIL_USERNAME'],
recipients=['[email protected]',
'[email protected]', app.config['MAIL_USERNAME']])
with app.open_resource(filename) as fp:
msg.attach('image.jpg', 'image/jpg', fp.read())
mail.send(msg)
return render_template('mail_sent.html')
@app.route('/upload-image', methods=['GET', 'POST'])
def upload_image():
for f in os.listdir(image_path):
os.remove(os.path.join(image_path, f))
print(f'file {f}')
print(image_path)
if request.method == 'POST':
if request.files:
if 'filesize' in request.cookies:
if not allowed_image_filesize(request.cookies['filesize']):
return redirect(request.url)
image = request.files['image']
if image.filename == '':
return redirect(request.url)
if allowed_image(image.filename):
filename = secure_filename(image.filename)
img = np.fromfile(image, np.uint8)
img = cv.imdecode(img, cv.IMREAD_COLOR)
quality = 80
quality_param = [int(cv.IMWRITE_JPEG_QUALITY), quality]
img_path = app.config['IMAGE_UPLOADS'] + '/' + filename
cv.imwrite(img_path, img, quality_param)
return render_template('success.html', filename=filename)
else:
return redirect(request.url)
return render_template('upload_image.html')
|
<mask token>
def get_req_var(var):
result = 0
for s in vars:
s = re.search('((?<=' + var + '>).+)', s)
if s:
result = s[0]
break
return result
<mask token>
@app.route('/')
def home():
return render_template('index.html')
def allowed_image_filesize(filesize):
if int(filesize) <= app.config['MAX_IMAGE_FILESIZE']:
return True
else:
return False
def allowed_image(filename):
if '.' not in filename:
return False
ext = filename.rsplit('.', 1)[1]
if ext.upper() in app.config['ALLOWED_IMAGE_EXTENSIONS']:
return True
else:
return False
@app.route('/success')
def success(filename):
return render_template('success.html', filename=filename)
@app.route('/display/<filename>')
def display_image(filename):
filename = 'uploaded_images/' + filename
return redirect(url_for('static', filename=filename), code=301)
@app.route('/download/<filename>')
def download_image(filename):
filename = 'static/uploaded_images/' + filename
return send_file(filename, as_attachment=True)
@app.route('/send-mail/<filename>')
def send_mail(filename):
filename = 'static/uploaded_images/' + filename
mail = Mail(app)
mail.init_app(app)
msg = Message('Sent from flask_app', sender=app.config['MAIL_USERNAME'],
recipients=['[email protected]',
'[email protected]', app.config['MAIL_USERNAME']])
with app.open_resource(filename) as fp:
msg.attach('image.jpg', 'image/jpg', fp.read())
mail.send(msg)
return render_template('mail_sent.html')
@app.route('/upload-image', methods=['GET', 'POST'])
def upload_image():
for f in os.listdir(image_path):
os.remove(os.path.join(image_path, f))
print(f'file {f}')
print(image_path)
if request.method == 'POST':
if request.files:
if 'filesize' in request.cookies:
if not allowed_image_filesize(request.cookies['filesize']):
return redirect(request.url)
image = request.files['image']
if image.filename == '':
return redirect(request.url)
if allowed_image(image.filename):
filename = secure_filename(image.filename)
img = np.fromfile(image, np.uint8)
img = cv.imdecode(img, cv.IMREAD_COLOR)
quality = 80
quality_param = [int(cv.IMWRITE_JPEG_QUALITY), quality]
img_path = app.config['IMAGE_UPLOADS'] + '/' + filename
cv.imwrite(img_path, img, quality_param)
return render_template('success.html', filename=filename)
else:
return redirect(request.url)
return render_template('upload_image.html')
|
<mask token>
file_path_file = open('flask_app/file_path.txt', 'r')
vars = file_path_file.readlines()
def get_req_var(var):
result = 0
for s in vars:
s = re.search('((?<=' + var + '>).+)', s)
if s:
result = s[0]
break
return result
image_path = get_req_var('IMAGE_UPLOADS')
app.config['IMAGE_UPLOADS'] = image_path
app.config['ALLOWED_IMAGE_EXTENSIONS'] = ['JPEG', 'JPG']
app.config['MAX_CONTENT_LENGTH'] = 50 * 1024 * 1024
app.config['MAX_IMAGE_FILESIZE'] = 50 * 1024 * 1024
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = get_req_var('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = get_req_var('MAIL_PASSWORD')
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
@app.route('/')
def home():
return render_template('index.html')
def allowed_image_filesize(filesize):
if int(filesize) <= app.config['MAX_IMAGE_FILESIZE']:
return True
else:
return False
def allowed_image(filename):
if '.' not in filename:
return False
ext = filename.rsplit('.', 1)[1]
if ext.upper() in app.config['ALLOWED_IMAGE_EXTENSIONS']:
return True
else:
return False
@app.route('/success')
def success(filename):
return render_template('success.html', filename=filename)
@app.route('/display/<filename>')
def display_image(filename):
filename = 'uploaded_images/' + filename
return redirect(url_for('static', filename=filename), code=301)
@app.route('/download/<filename>')
def download_image(filename):
filename = 'static/uploaded_images/' + filename
return send_file(filename, as_attachment=True)
@app.route('/send-mail/<filename>')
def send_mail(filename):
filename = 'static/uploaded_images/' + filename
mail = Mail(app)
mail.init_app(app)
msg = Message('Sent from flask_app', sender=app.config['MAIL_USERNAME'],
recipients=['[email protected]',
'[email protected]', app.config['MAIL_USERNAME']])
with app.open_resource(filename) as fp:
msg.attach('image.jpg', 'image/jpg', fp.read())
mail.send(msg)
return render_template('mail_sent.html')
@app.route('/upload-image', methods=['GET', 'POST'])
def upload_image():
for f in os.listdir(image_path):
os.remove(os.path.join(image_path, f))
print(f'file {f}')
print(image_path)
if request.method == 'POST':
if request.files:
if 'filesize' in request.cookies:
if not allowed_image_filesize(request.cookies['filesize']):
return redirect(request.url)
image = request.files['image']
if image.filename == '':
return redirect(request.url)
if allowed_image(image.filename):
filename = secure_filename(image.filename)
img = np.fromfile(image, np.uint8)
img = cv.imdecode(img, cv.IMREAD_COLOR)
quality = 80
quality_param = [int(cv.IMWRITE_JPEG_QUALITY), quality]
img_path = app.config['IMAGE_UPLOADS'] + '/' + filename
cv.imwrite(img_path, img, quality_param)
return render_template('success.html', filename=filename)
else:
return redirect(request.url)
return render_template('upload_image.html')
|
from flask import render_template, request, redirect, url_for, send_file
from flask_app import app
import re
import os
from werkzeug.utils import secure_filename
import numpy as np
import cv2 as cv
from flask_mail import Message, Mail
file_path_file = open('flask_app/file_path.txt', 'r')
vars = file_path_file.readlines()
def get_req_var(var):
result = 0
for s in vars:
s = re.search('((?<=' + var + '>).+)', s)
if s:
result = s[0]
break
return result
image_path = get_req_var('IMAGE_UPLOADS')
app.config['IMAGE_UPLOADS'] = image_path
app.config['ALLOWED_IMAGE_EXTENSIONS'] = ['JPEG', 'JPG']
app.config['MAX_CONTENT_LENGTH'] = 50 * 1024 * 1024
app.config['MAX_IMAGE_FILESIZE'] = 50 * 1024 * 1024
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = get_req_var('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = get_req_var('MAIL_PASSWORD')
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
@app.route('/')
def home():
return render_template('index.html')
def allowed_image_filesize(filesize):
if int(filesize) <= app.config['MAX_IMAGE_FILESIZE']:
return True
else:
return False
def allowed_image(filename):
if '.' not in filename:
return False
ext = filename.rsplit('.', 1)[1]
if ext.upper() in app.config['ALLOWED_IMAGE_EXTENSIONS']:
return True
else:
return False
@app.route('/success')
def success(filename):
return render_template('success.html', filename=filename)
@app.route('/display/<filename>')
def display_image(filename):
filename = 'uploaded_images/' + filename
return redirect(url_for('static', filename=filename), code=301)
@app.route('/download/<filename>')
def download_image(filename):
filename = 'static/uploaded_images/' + filename
return send_file(filename, as_attachment=True)
@app.route('/send-mail/<filename>')
def send_mail(filename):
filename = 'static/uploaded_images/' + filename
mail = Mail(app)
mail.init_app(app)
msg = Message('Sent from flask_app', sender=app.config['MAIL_USERNAME'],
recipients=['[email protected]',
'[email protected]', app.config['MAIL_USERNAME']])
with app.open_resource(filename) as fp:
msg.attach('image.jpg', 'image/jpg', fp.read())
mail.send(msg)
return render_template('mail_sent.html')
@app.route('/upload-image', methods=['GET', 'POST'])
def upload_image():
for f in os.listdir(image_path):
os.remove(os.path.join(image_path, f))
print(f'file {f}')
print(image_path)
if request.method == 'POST':
if request.files:
if 'filesize' in request.cookies:
if not allowed_image_filesize(request.cookies['filesize']):
return redirect(request.url)
image = request.files['image']
if image.filename == '':
return redirect(request.url)
if allowed_image(image.filename):
filename = secure_filename(image.filename)
img = np.fromfile(image, np.uint8)
img = cv.imdecode(img, cv.IMREAD_COLOR)
quality = 80
quality_param = [int(cv.IMWRITE_JPEG_QUALITY), quality]
img_path = app.config['IMAGE_UPLOADS'] + '/' + filename
cv.imwrite(img_path, img, quality_param)
return render_template('success.html', filename=filename)
else:
return redirect(request.url)
return render_template('upload_image.html')
|
from flask import render_template, request, redirect, url_for, send_file
from flask_app import app
import re
import os
from werkzeug.utils import secure_filename
import numpy as np
import cv2 as cv
from flask_mail import Message, Mail
file_path_file = open('flask_app/file_path.txt', 'r')
vars = file_path_file.readlines()
def get_req_var(var):
result = 0
for s in vars:
s = re.search("((?<=" + var + ">).+)", s)
if s:
result = s[0]
break
return result
image_path = get_req_var("IMAGE_UPLOADS")
app.config["IMAGE_UPLOADS"] = image_path
app.config["ALLOWED_IMAGE_EXTENSIONS"] = ["JPEG", "JPG"]
app.config['MAX_CONTENT_LENGTH'] = 50 * 1024 * 1024
app.config["MAX_IMAGE_FILESIZE"] = 50 * 1024 * 1024
# for mail
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = get_req_var("MAIL_USERNAME")
app.config['MAIL_PASSWORD'] = get_req_var("MAIL_PASSWORD")
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
# for mail
@app.route('/')
def home():
return render_template('index.html')
def allowed_image_filesize(filesize):
if int(filesize) <= app.config["MAX_IMAGE_FILESIZE"]:
return True
else:
return False
def allowed_image(filename):
# We only want files with a . in the filename
if "." not in filename:
return False
# Split the extension from the filename
ext = filename.rsplit(".", 1)[1]
# Check if the extension is in ALLOWED_IMAGE_EXTENSIONS
if ext.upper() in app.config["ALLOWED_IMAGE_EXTENSIONS"]:
return True
else:
return False
@app.route('/success')
def success(filename):
return render_template('success.html', filename=filename)
@app.route('/display/<filename>')
def display_image(filename):
filename = 'uploaded_images/' + filename
return redirect(url_for('static', filename=filename), code=301)
@app.route("/download/<filename>")
def download_image(filename):
filename = 'static/uploaded_images/' + filename
return send_file(filename, as_attachment=True)
@app.route("/send-mail/<filename>")
def send_mail(filename):
filename = 'static/uploaded_images/' + filename
mail = Mail(app)
mail.init_app(app)
msg = Message(
"Sent from flask_app",
sender=app.config["MAIL_USERNAME"],
recipients=["[email protected]",
"[email protected]", app.config["MAIL_USERNAME"]],
)
with app.open_resource(filename) as fp:
msg.attach("image.jpg", "image/jpg", fp.read())
mail.send(msg)
return render_template("mail_sent.html")
@app.route("/upload-image", methods=["GET", "POST"])
def upload_image():
# cwd = os.path.join(os.getcwd(), image_path)
# print(cwd)
for f in os.listdir(image_path):
os.remove(os.path.join(image_path, f))
print(f"file {f}")
print(image_path)
if request.method == "POST":
if request.files:
if "filesize" in request.cookies:
if not allowed_image_filesize(request.cookies["filesize"]):
return redirect(request.url)
image = request.files["image"]
if image.filename == "":
return redirect(request.url)
if allowed_image(image.filename):
filename = secure_filename(image.filename)
img = np.fromfile(image, np.uint8)
img = cv.imdecode(img, cv.IMREAD_COLOR)
quality = 80
quality_param = [int(cv.IMWRITE_JPEG_QUALITY), quality]
img_path = app.config["IMAGE_UPLOADS"] + "/" + filename
cv.imwrite(img_path, img, quality_param)
return render_template('success.html', filename=filename)
else:
return redirect(request.url)
return render_template("upload_image.html")
|
[
6,
9,
10,
11,
12
] |
2,405 |
d4361b169bf75d3af82eca3d26609961ccc2f27e
|
<mask token>
|
<mask token>
array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
solution = Solution.Find(6, array)
|
from find import Solution
array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
solution = Solution.Find(6, array)
|
from find import Solution
array = [[1,2,3],[4,5,6],[7,8,9]]
solution = Solution.Find(6,array)
| null |
[
0,
1,
2,
3
] |
2,406 |
74faeb1c09fe136ec4d9578173aeebe54b451e33
|
<mask token>
|
<mask token>
@authentication.route('/register', methods=['GET', 'POST'])
def register():
form = Register()
if form.validate_on_submit():
data = {'first_name': request.form.get('first_name'), 'last_name':
request.form.get('last_name'), 'email': request.form.get(
'email'), 'password': request.form.get('password')}
u = User(first_name=data['first_name'], last_name=data['last_name'],
email=data['email'], password=data['password'])
u.hash_pass(u.password)
db.session.add(u)
db.session.commit()
flash('You have succesfully registered!', 'primary')
return redirect(url_for('authentication.login'))
content = {'form': form}
return render_template('register.html', **content)
<mask token>
@authentication.route('/logout')
def logout():
logout_user()
flash('You have successfully logged out!', 'info')
return redirect(url_for('authentication.login'))
|
<mask token>
@authentication.route('/register', methods=['GET', 'POST'])
def register():
form = Register()
if form.validate_on_submit():
data = {'first_name': request.form.get('first_name'), 'last_name':
request.form.get('last_name'), 'email': request.form.get(
'email'), 'password': request.form.get('password')}
u = User(first_name=data['first_name'], last_name=data['last_name'],
email=data['email'], password=data['password'])
u.hash_pass(u.password)
db.session.add(u)
db.session.commit()
flash('You have succesfully registered!', 'primary')
return redirect(url_for('authentication.login'))
content = {'form': form}
return render_template('register.html', **content)
@authentication.route('/login', methods=['GET', 'POST'])
def login():
form = Login()
user = User.query.filter_by(email=request.form.get('email')).first()
if form.validate_on_submit():
if user is None or not user.check_password(request.form.get('password')
):
flash('You have entered incorrect details, please try again',
'danger')
return redirect(url_for('authentication.login'))
login_user(user)
flash('You have successfully logged in!', 'success')
return redirect(url_for('main.index'))
content = {'form': form}
return render_template('login.html', **content)
@authentication.route('/logout')
def logout():
logout_user()
flash('You have successfully logged out!', 'info')
return redirect(url_for('authentication.login'))
|
from . import bp as authentication
from app import db
from flask import current_app as app, render_template, request, redirect, url_for, flash, session
from flask_login import login_user, logout_user, current_user, login_required
from .forms import Register, Login, Settings
from .models import User
@authentication.route('/register', methods=['GET', 'POST'])
def register():
form = Register()
if form.validate_on_submit():
data = {'first_name': request.form.get('first_name'), 'last_name':
request.form.get('last_name'), 'email': request.form.get(
'email'), 'password': request.form.get('password')}
u = User(first_name=data['first_name'], last_name=data['last_name'],
email=data['email'], password=data['password'])
u.hash_pass(u.password)
db.session.add(u)
db.session.commit()
flash('You have succesfully registered!', 'primary')
return redirect(url_for('authentication.login'))
content = {'form': form}
return render_template('register.html', **content)
@authentication.route('/login', methods=['GET', 'POST'])
def login():
form = Login()
user = User.query.filter_by(email=request.form.get('email')).first()
if form.validate_on_submit():
if user is None or not user.check_password(request.form.get('password')
):
flash('You have entered incorrect details, please try again',
'danger')
return redirect(url_for('authentication.login'))
login_user(user)
flash('You have successfully logged in!', 'success')
return redirect(url_for('main.index'))
content = {'form': form}
return render_template('login.html', **content)
@authentication.route('/logout')
def logout():
logout_user()
flash('You have successfully logged out!', 'info')
return redirect(url_for('authentication.login'))
|
from .import bp as authentication
from app import db
from flask import current_app as app, render_template, request, redirect, url_for, flash, session
from flask_login import login_user, logout_user, current_user, login_required
from .forms import Register, Login, Settings
from .models import User
# route for register using a WTForm
@authentication.route('/register', methods=['GET', 'POST'])
def register():
# set an instance of the form
form = Register()
if form.validate_on_submit():
# collect the data from the form into a dictionary
data = {
'first_name' : request.form.get('first_name'),
'last_name' : request.form.get('last_name'),
'email' : request.form.get('email'),
'password' : request.form.get('password')
}
# create an instance of the User class using the data dictionary
u = User(first_name=data['first_name'], last_name=data['last_name'], email=data['email'], password=data['password'])
# securing the password
u.hash_pass(u.password)
# adding the user to the database
db.session.add(u)
db.session.commit()
# confirmations
flash("You have succesfully registered!", 'primary')
# send them to the login page
return redirect(url_for("authentication.login"))
# sending the form model to the HTML page for rendering
content = {
'form': form
}
return render_template('register.html', **content)
# route for login using a WTform
@authentication.route('/login', methods=['GET', 'POST'])
def login():
# set an instance of the form
form = Login()
user = User.query.filter_by(email=request.form.get('email')).first()
if form.validate_on_submit():
# check if the info is correct
if user is None or not user.check_password(request.form.get('password')):
flash("You have entered incorrect details, please try again", 'danger')
return redirect(url_for('authentication.login'))
login_user(user)
flash("You have successfully logged in!", 'success')
return redirect(url_for('main.index'))
# sending the form model to the HTML page for rendering
content = {
'form' : form
}
return render_template('login.html', **content)
# logout route, pretty simple
@authentication.route('/logout')
def logout():
logout_user()
flash("You have successfully logged out!", 'info')
return redirect(url_for("authentication.login"))
|
[
0,
2,
3,
4,
5
] |
2,407 |
743aa4ccbb9a131b5ef3d04475789d3d1da1a2fa
|
<mask token>
class Category(db.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
|
<mask token>
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False)
addtime = db.Column(db.DateTime, nullable=False)
def __repr__(self):
return '<User %r>' % self.name
<mask token>
|
<mask token>
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False)
addtime = db.Column(db.DateTime, nullable=False)
def __repr__(self):
return '<User %r>' % self.name
if __name__ == '__main__':
db.create_all()
|
from flask_sqlalchemy import SQLAlchemy
from config.manager import app
from config.db import db
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False)
addtime = db.Column(db.DateTime, nullable=False)
def __repr__(self):
return '<User %r>' % self.name
if __name__ == '__main__':
db.create_all()
|
# coding:utf-8
from flask_sqlalchemy import SQLAlchemy
from config.manager import app
from config.db import db
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(20), nullable=False) # 账号
addtime = db.Column(db.DateTime, nullable=False) # 注册时间
def __repr__(self):
return "<User %r>" % self.name
if __name__ == '__main__':
db.create_all()
|
[
1,
3,
4,
5,
6
] |
2,408 |
418f2e1cbe4fb3ef369e981e72bf40eeddfd052e
|
<mask token>
|
<mask token>
def my_loss():
return nn.CrossEntropyLoss()
|
import torch.nn as nn
def my_loss():
return nn.CrossEntropyLoss()
|
import torch.nn as nn
def my_loss():
return nn.CrossEntropyLoss()
| null |
[
0,
1,
2,
3
] |
2,409 |
66b42791325a53172d4514cdd16ccd58d4edb186
|
<mask token>
class Tarefas(Screen):
<mask token>
<mask token>
class Tarefa(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.label.text = text
class Test(App):
def build(self):
return Gerenciador()
<mask token>
|
<mask token>
class Gerenciador(ScreenManager):
pass
class Menu(Screen):
pass
class Tarefas(Screen):
def __init__(self, tarefas=[], **kwargs):
super().__init__(**kwargs)
for tarefa in tarefas:
self.ids.box.add_widget(Tarefa(text=tarefa))
def addWidget(self):
texto = self.ids.texto.text
self.ids.box.add_widget(Tarefa(text=texto))
self.ids.texto.text = ''
class Tarefa(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.label.text = text
class Test(App):
def build(self):
return Gerenciador()
<mask token>
|
<mask token>
class Gerenciador(ScreenManager):
pass
class Menu(Screen):
pass
class Tarefas(Screen):
def __init__(self, tarefas=[], **kwargs):
super().__init__(**kwargs)
for tarefa in tarefas:
self.ids.box.add_widget(Tarefa(text=tarefa))
def addWidget(self):
texto = self.ids.texto.text
self.ids.box.add_widget(Tarefa(text=texto))
self.ids.texto.text = ''
class Tarefa(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.label.text = text
class Test(App):
def build(self):
return Gerenciador()
Test().run()
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen
class Gerenciador(ScreenManager):
pass
class Menu(Screen):
pass
class Tarefas(Screen):
def __init__(self, tarefas=[], **kwargs):
super().__init__(**kwargs)
for tarefa in tarefas:
self.ids.box.add_widget(Tarefa(text=tarefa))
def addWidget(self):
texto = self.ids.texto.text
self.ids.box.add_widget(Tarefa(text=texto))
self.ids.texto.text = ''
class Tarefa(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.label.text = text
class Test(App):
def build(self):
return Gerenciador()
Test().run()
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen
class Gerenciador(ScreenManager):
pass
class Menu(Screen):
pass
class Tarefas(Screen):
def __init__(self, tarefas=[], **kwargs):
super().__init__(**kwargs)
for tarefa in tarefas:
self.ids.box.add_widget(Tarefa(text = tarefa))
def addWidget(self):
texto = self.ids.texto.text
self.ids.box.add_widget(Tarefa(text = texto))
self.ids.texto.text = ''
class Tarefa(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.label.text = text
class Test(App):
def build(self):
return Gerenciador()
Test().run()
|
[
5,
9,
10,
11,
12
] |
2,410 |
0354445d255cc79d3cb9242f82d37e035ff61788
|
/Users/jhajhajhajha1/anaconda/lib/python3.6/codecs.py
| null | null | null | null |
[
0
] |
2,411 |
c46495eebbe796253f56b7472d5548b41c5d0bc4
|
<mask token>
def check_number_2(problem_input: str) ->bool:
previous = 0
current = 1
triple = True
seen_a_double = False
length = len(problem_input)
while current < length:
if int(problem_input[current]) < int(problem_input[previous]):
return False
if int(problem_input[current]) == int(problem_input[previous]):
if previous >= 1:
triple = int(problem_input[previous - 1]) == int(problem_input
[previous])
if current < length - 1:
triple = int(problem_input[current + 1]) == int(problem_input
[current])
while current < length - 1 and int(problem_input[current]
) == int(problem_input[current + 1]):
current += 1
previous += 1
if not triple:
seen_a_double = True
previous += 1
current += 1
return seen_a_double
<mask token>
def main():
x = '111111'
print(check_number(x) is True)
x = '223450'
print(check_number(x) is False)
x = '123789'
print(check_number(x) is False)
print('PART 1:', part_1())
x = '112233'
print(check_number_2(x) is True)
x = '123444'
print(check_number_2(x) is False)
x = '111122'
print(check_number_2(x) is True)
x = '112222'
print(check_number_2(x) is True)
x = '1112589'
print(check_number_2(x) is False)
print('PART 2:', part_2())
<mask token>
|
def part_1() ->int:
start = 382345
end = 843167
total = 0
for number in range(start, end + 1):
if check_number(str(number)):
total += 1
return total
<mask token>
def check_number_2(problem_input: str) ->bool:
previous = 0
current = 1
triple = True
seen_a_double = False
length = len(problem_input)
while current < length:
if int(problem_input[current]) < int(problem_input[previous]):
return False
if int(problem_input[current]) == int(problem_input[previous]):
if previous >= 1:
triple = int(problem_input[previous - 1]) == int(problem_input
[previous])
if current < length - 1:
triple = int(problem_input[current + 1]) == int(problem_input
[current])
while current < length - 1 and int(problem_input[current]
) == int(problem_input[current + 1]):
current += 1
previous += 1
if not triple:
seen_a_double = True
previous += 1
current += 1
return seen_a_double
<mask token>
def main():
x = '111111'
print(check_number(x) is True)
x = '223450'
print(check_number(x) is False)
x = '123789'
print(check_number(x) is False)
print('PART 1:', part_1())
x = '112233'
print(check_number_2(x) is True)
x = '123444'
print(check_number_2(x) is False)
x = '111122'
print(check_number_2(x) is True)
x = '112222'
print(check_number_2(x) is True)
x = '1112589'
print(check_number_2(x) is False)
print('PART 2:', part_2())
<mask token>
|
def part_1() ->int:
start = 382345
end = 843167
total = 0
for number in range(start, end + 1):
if check_number(str(number)):
total += 1
return total
def check_number(problem_input: str) ->bool:
previous = 0
double = False
for current in range(1, len(problem_input)):
if int(problem_input[current]) < int(problem_input[previous]):
return False
if int(problem_input[previous]) == int(problem_input[current]):
double = True
previous += 1
return double
def check_number_2(problem_input: str) ->bool:
previous = 0
current = 1
triple = True
seen_a_double = False
length = len(problem_input)
while current < length:
if int(problem_input[current]) < int(problem_input[previous]):
return False
if int(problem_input[current]) == int(problem_input[previous]):
if previous >= 1:
triple = int(problem_input[previous - 1]) == int(problem_input
[previous])
if current < length - 1:
triple = int(problem_input[current + 1]) == int(problem_input
[current])
while current < length - 1 and int(problem_input[current]
) == int(problem_input[current + 1]):
current += 1
previous += 1
if not triple:
seen_a_double = True
previous += 1
current += 1
return seen_a_double
def part_2() ->int:
start = 382345
end = 843167
total = 0
for number in range(start, end + 1):
if check_number_2(str(number)):
total += 1
return total
def main():
x = '111111'
print(check_number(x) is True)
x = '223450'
print(check_number(x) is False)
x = '123789'
print(check_number(x) is False)
print('PART 1:', part_1())
x = '112233'
print(check_number_2(x) is True)
x = '123444'
print(check_number_2(x) is False)
x = '111122'
print(check_number_2(x) is True)
x = '112222'
print(check_number_2(x) is True)
x = '1112589'
print(check_number_2(x) is False)
print('PART 2:', part_2())
<mask token>
|
def part_1() ->int:
start = 382345
end = 843167
total = 0
for number in range(start, end + 1):
if check_number(str(number)):
total += 1
return total
def check_number(problem_input: str) ->bool:
previous = 0
double = False
for current in range(1, len(problem_input)):
if int(problem_input[current]) < int(problem_input[previous]):
return False
if int(problem_input[previous]) == int(problem_input[current]):
double = True
previous += 1
return double
def check_number_2(problem_input: str) ->bool:
previous = 0
current = 1
triple = True
seen_a_double = False
length = len(problem_input)
while current < length:
if int(problem_input[current]) < int(problem_input[previous]):
return False
if int(problem_input[current]) == int(problem_input[previous]):
if previous >= 1:
triple = int(problem_input[previous - 1]) == int(problem_input
[previous])
if current < length - 1:
triple = int(problem_input[current + 1]) == int(problem_input
[current])
while current < length - 1 and int(problem_input[current]
) == int(problem_input[current + 1]):
current += 1
previous += 1
if not triple:
seen_a_double = True
previous += 1
current += 1
return seen_a_double
def part_2() ->int:
start = 382345
end = 843167
total = 0
for number in range(start, end + 1):
if check_number_2(str(number)):
total += 1
return total
def main():
x = '111111'
print(check_number(x) is True)
x = '223450'
print(check_number(x) is False)
x = '123789'
print(check_number(x) is False)
print('PART 1:', part_1())
x = '112233'
print(check_number_2(x) is True)
x = '123444'
print(check_number_2(x) is False)
x = '111122'
print(check_number_2(x) is True)
x = '112222'
print(check_number_2(x) is True)
x = '1112589'
print(check_number_2(x) is False)
print('PART 2:', part_2())
if __name__ == '__main__':
main()
|
def part_1() -> int:
start = 382345
end = 843167
total = 0
for number in range(start, end + 1):
if check_number(str(number)):
total += 1
return total
def check_number(problem_input: str) -> bool:
previous = 0
double = False
for current in range(1, len(problem_input)):
if int(problem_input[current]) < int(problem_input[previous]):
return False
if int(problem_input[previous]) == int(problem_input[current]):
double = True
previous += 1
return double
def check_number_2(problem_input: str) -> bool:
previous = 0
current = 1
triple = True
seen_a_double = False
length = len(problem_input)
while current < length:
if int(problem_input[current]) < int(problem_input[previous]):
return False
if int(problem_input[current]) == int(problem_input[previous]):
if previous >= 1:
triple = int(problem_input[previous - 1]) == int(problem_input[previous])
if current < length - 1:
triple = int(problem_input[current + 1]) == int(problem_input[current])
while current < length - 1 and int(problem_input[current]) == int(problem_input[current + 1]):
current += 1
previous += 1
if not triple:
seen_a_double = True
previous += 1
current += 1
return seen_a_double
def part_2() -> int:
start = 382345
end = 843167
total = 0
for number in range(start, end + 1):
if check_number_2(str(number)):
total += 1
return total
def main():
x = "111111"
print(check_number(x) is True)
x = "223450"
print(check_number(x) is False)
x = "123789"
print(check_number(x) is False)
print("PART 1:", part_1()) # should be 460
x = "112233"
print(check_number_2(x) is True)
x = "123444"
print(check_number_2(x) is False)
x = "111122"
print(check_number_2(x) is True)
x = "112222"
print(check_number_2(x) is True)
x = "1112589"
print(check_number_2(x) is False)
print("PART 2:", part_2())
if __name__ == '__main__':
main()
|
[
2,
3,
5,
6,
7
] |
2,412 |
1c5ca920fe1f116a5bc52c9e5c53c13b1e1c925f
|
<mask token>
|
def tobin(n):
bin = ''
while n / 2 != 0:
if n % 2 == 0:
bin = bin + '0'
else:
bin = bin + '1'
if n % 2 == 1:
bin = bin + '1'
return bin
<mask token>
|
def tobin(n):
bin = ''
while n / 2 != 0:
if n % 2 == 0:
bin = bin + '0'
else:
bin = bin + '1'
if n % 2 == 1:
bin = bin + '1'
return bin
<mask token>
print(bin)
|
def tobin(n):
bin = ''
while n / 2 != 0:
if n % 2 == 0:
bin = bin + '0'
else:
bin = bin + '1'
if n % 2 == 1:
bin = bin + '1'
return bin
n = int(input())
bin = tobin(5)
print(bin)
|
def tobin(n):
bin = "";
while(n/2!=0):
if n%2==0:
bin = bin + "0"
else:
bin = bin + "1"
if n%2==1:
bin = bin + "1"
return bin
n = int(input())
bin = tobin(5)
print(bin)
|
[
0,
1,
2,
3,
4
] |
2,413 |
f193094c551df2a32860948b1a8710b53ca0dfb6
|
<mask token>
|
<mask token>
def quicksort(x, pivot_index):
key1_idx, key2_idx, key3_idx = 0, 0, len(x)
key1_val, key2_val = 'key1', 'key2'
while key2_idx < key3_idx:
if x[key2_idx]['key'] == key1_val:
x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]
key1_idx, key2_idx = key1_idx + 1, key2_idx + 1
elif x[key2_idx]['key'] == key2_val:
key2_idx += 1
else:
key3_idx -= 1
x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]
return x
<mask token>
|
<mask token>
def quicksort(x, pivot_index):
key1_idx, key2_idx, key3_idx = 0, 0, len(x)
key1_val, key2_val = 'key1', 'key2'
while key2_idx < key3_idx:
if x[key2_idx]['key'] == key1_val:
x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]
key1_idx, key2_idx = key1_idx + 1, key2_idx + 1
elif x[key2_idx]['key'] == key2_val:
key2_idx += 1
else:
key3_idx -= 1
x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]
return x
if __name__ == '__main__':
keys = ['key1', 'key2', 'key3']
values = [0, 1, 2, 3, 4]
key_values = [{'key': key, 'value': value} for key in keys for value in
values]
random.shuffle(key_values)
print(quicksort(key_values, 7))
|
import random
def quicksort(x, pivot_index):
key1_idx, key2_idx, key3_idx = 0, 0, len(x)
key1_val, key2_val = 'key1', 'key2'
while key2_idx < key3_idx:
if x[key2_idx]['key'] == key1_val:
x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]
key1_idx, key2_idx = key1_idx + 1, key2_idx + 1
elif x[key2_idx]['key'] == key2_val:
key2_idx += 1
else:
key3_idx -= 1
x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]
return x
if __name__ == '__main__':
keys = ['key1', 'key2', 'key3']
values = [0, 1, 2, 3, 4]
key_values = [{'key': key, 'value': value} for key in keys for value in
values]
random.shuffle(key_values)
print(quicksort(key_values, 7))
|
import random
#quicksort a list of objects based on keys, which can be any of 3 values
# done in O(n) time in one pass, and O(1) additional space complexity
def quicksort(x, pivot_index):
key1_idx, key2_idx, key3_idx = 0, 0, len(x)
key1_val, key2_val= 'key1', 'key2'
while key2_idx < key3_idx:
if x[key2_idx]['key'] == key1_val:
x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]
key1_idx, key2_idx = key1_idx + 1, key2_idx + 1
elif x[key2_idx]['key'] == key2_val:
key2_idx += 1
else:
key3_idx -= 1
x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]
return x
if __name__ == '__main__':
keys = ['key1', 'key2', 'key3']
values = [0, 1, 2, 3, 4]
key_values = [{'key': key, 'value': value} for key in keys for value in values]
random.shuffle(key_values)
print(quicksort(key_values, 7))
|
[
0,
1,
2,
3,
4
] |
2,414 |
0c7efa99dc22154f9835b277cba5057b213a28e7
|
<mask token>
|
<mask token>
class NombreaplicacionConfig(AppConfig):
<mask token>
|
<mask token>
class NombreaplicacionConfig(AppConfig):
name = 'nombreAplicacion'
|
from django.apps import AppConfig
class NombreaplicacionConfig(AppConfig):
name = 'nombreAplicacion'
| null |
[
0,
1,
2,
3
] |
2,415 |
d0364b7cad29c639af9df5c78e810144ffd6ce2e
|
<mask token>
|
<mask token>
def train(token2id, train_data, lr, batch_size, epochs, model):
dataset = DataGenerator(token2id, train_data)
dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=
my_collate)
model = to_device(model)
model_optimizer = optim.Adam(model.discriminator.parameters(), lr=lr)
criterion = nn.BCELoss()
for epoch in range(1, epochs):
print('Epoch {}'.format(epoch))
print('*' * 80)
running_loss = 0
for i, data in enumerate(dataloader):
data = to_device(data)
x, x_len, y, _ = data
predict = model(x, x_len)
loss = criterion(predict.squeeze(1), y)
model_optimizer.zero_grad()
loss.backward()
model_optimizer.step()
running_loss += loss.item()
if i % 10 == 0 and i != 0:
print('Average batch loss: {}'.format(running_loss / 10))
running_loss = 0
<mask token>
|
<mask token>
def train(token2id, train_data, lr, batch_size, epochs, model):
dataset = DataGenerator(token2id, train_data)
dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=
my_collate)
model = to_device(model)
model_optimizer = optim.Adam(model.discriminator.parameters(), lr=lr)
criterion = nn.BCELoss()
for epoch in range(1, epochs):
print('Epoch {}'.format(epoch))
print('*' * 80)
running_loss = 0
for i, data in enumerate(dataloader):
data = to_device(data)
x, x_len, y, _ = data
predict = model(x, x_len)
loss = criterion(predict.squeeze(1), y)
model_optimizer.zero_grad()
loss.backward()
model_optimizer.step()
running_loss += loss.item()
if i % 10 == 0 and i != 0:
print('Average batch loss: {}'.format(running_loss / 10))
running_loss = 0
if __name__ == '__mian__':
pass
|
from utils import to_device
from utils import build_dictionary, my_collate
from DataGenerator import DataGenerator
from torch.utils.data import DataLoader
from torch import optim
import torch.nn as nn
from ADSentimentModel import ADSentimentModel
import torch
def train(token2id, train_data, lr, batch_size, epochs, model):
dataset = DataGenerator(token2id, train_data)
dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=
my_collate)
model = to_device(model)
model_optimizer = optim.Adam(model.discriminator.parameters(), lr=lr)
criterion = nn.BCELoss()
for epoch in range(1, epochs):
print('Epoch {}'.format(epoch))
print('*' * 80)
running_loss = 0
for i, data in enumerate(dataloader):
data = to_device(data)
x, x_len, y, _ = data
predict = model(x, x_len)
loss = criterion(predict.squeeze(1), y)
model_optimizer.zero_grad()
loss.backward()
model_optimizer.step()
running_loss += loss.item()
if i % 10 == 0 and i != 0:
print('Average batch loss: {}'.format(running_loss / 10))
running_loss = 0
if __name__ == '__mian__':
pass
|
from utils import to_device
from utils import build_dictionary,my_collate
from DataGenerator import DataGenerator
from torch.utils.data import DataLoader
from torch import optim
import torch.nn as nn
from ADSentimentModel import ADSentimentModel
import torch
def train(token2id, train_data, lr, batch_size, epochs,model):
dataset = DataGenerator(token2id, train_data)
dataloader = DataLoader(dataset,batch_size=batch_size,collate_fn=my_collate)
model = to_device(model)
model_optimizer = optim.Adam(model.discriminator.parameters(),lr=lr)
criterion = nn.BCELoss()
for epoch in range(1,epochs):
print("Epoch {}".format(epoch))
print("*"*80)
running_loss = 0
for i,data in enumerate(dataloader):
data = to_device(data)
x,x_len,y,_ = data
predict = model(x,x_len)
loss = criterion(predict.squeeze(1),y)
model_optimizer.zero_grad()
loss.backward()
model_optimizer.step()
running_loss += loss.item()
if i%10 == 0 and i != 0 :
print("Average batch loss: {}".format(running_loss/10))
running_loss = 0
if __name__ == "__mian__":
pass
|
[
0,
1,
2,
3,
4
] |
2,416 |
43d9edd9120351ce5065eb266d482ccaa2e56177
|
<mask token>
|
<mask token>
model.add(Dense(5, input_dim=1, activation='relu'))
model.add(Dense(3))
model.add(Dense(1))
model.summary()
<mask token>
|
<mask token>
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x2 = np.array([11, 12, 13, 14, 15])
model = Sequential()
model.add(Dense(5, input_dim=1, activation='relu'))
model.add(Dense(3))
model.add(Dense(1))
model.summary()
<mask token>
|
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x2 = np.array([11, 12, 13, 14, 15])
model = Sequential()
model.add(Dense(5, input_dim=1, activation='relu'))
model.add(Dense(3))
model.add(Dense(1))
model.summary()
<mask token>
|
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x2 = np.array([11, 12, 13, 14, 15])
model = Sequential()
model.add(Dense(5, input_dim=1, activation='relu'))
model.add(Dense(3))
model.add(Dense(1))
model.summary()
'''
model.compile(loss='mse', optimizer='adam',
metrics=['accuracy'])
model.fit(x, y, epochs=100)
loss, acc = model.evaluate(x, y)
print("acc : ", acc)
print("loss : ", loss)
y_predict = model.predict(x2)
print(y_predict)
'''
|
[
0,
1,
2,
3,
4
] |
2,417 |
eca40c37e0e437a5f4e5643f5fb7cd3e38605471
|
<mask token>
def about(request):
teams = Team.objects.all()
return render(request, 'pages/about.html', {'teams': teams})
<mask token>
def contact(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
phone = request.POST['phone']
message = request.POST['message']
cfm = ContactForm(name=name, email=email, subject=subject, phone=
phone, message=message)
cfm.save()
messages.success(request, 'Successfully Saved')
return render(request, 'pages/contact.html')
|
<mask token>
def about(request):
teams = Team.objects.all()
return render(request, 'pages/about.html', {'teams': teams})
def service(request):
return render(request, 'pages/services.html')
def contact(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
phone = request.POST['phone']
message = request.POST['message']
cfm = ContactForm(name=name, email=email, subject=subject, phone=
phone, message=message)
cfm.save()
messages.success(request, 'Successfully Saved')
return render(request, 'pages/contact.html')
|
<mask token>
def index(request):
teams = Team.objects.all()
cars = Car.objects.order_by('-created_date').filter(is_featured=True)
all_cars = Car.objects.order_by('-created_date').all()
model_field = Car.objects.values_list('model', flat=True).distinct()
state_field = Car.objects.values_list('state', flat=True).distinct()
body_field = Car.objects.values_list('body_style', flat=True).distinct()
year_field = Car.objects.values_list('year', flat=True).distinct()
return render(request, 'pages/index.html', {'teams': teams,
'featured_cars': cars, 'all_cars': all_cars, 'model_field':
model_field, 'state_field': state_field, 'body_field': body_field,
'year_field': year_field})
def about(request):
teams = Team.objects.all()
return render(request, 'pages/about.html', {'teams': teams})
def service(request):
return render(request, 'pages/services.html')
def contact(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
phone = request.POST['phone']
message = request.POST['message']
cfm = ContactForm(name=name, email=email, subject=subject, phone=
phone, message=message)
cfm.save()
messages.success(request, 'Successfully Saved')
return render(request, 'pages/contact.html')
|
from django.shortcuts import render
from .models import Team, ContactForm
from cars.models import Car
from django.contrib import messages
def index(request):
teams = Team.objects.all()
cars = Car.objects.order_by('-created_date').filter(is_featured=True)
all_cars = Car.objects.order_by('-created_date').all()
model_field = Car.objects.values_list('model', flat=True).distinct()
state_field = Car.objects.values_list('state', flat=True).distinct()
body_field = Car.objects.values_list('body_style', flat=True).distinct()
year_field = Car.objects.values_list('year', flat=True).distinct()
return render(request, 'pages/index.html', {'teams': teams,
'featured_cars': cars, 'all_cars': all_cars, 'model_field':
model_field, 'state_field': state_field, 'body_field': body_field,
'year_field': year_field})
def about(request):
teams = Team.objects.all()
return render(request, 'pages/about.html', {'teams': teams})
def service(request):
return render(request, 'pages/services.html')
def contact(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
phone = request.POST['phone']
message = request.POST['message']
cfm = ContactForm(name=name, email=email, subject=subject, phone=
phone, message=message)
cfm.save()
messages.success(request, 'Successfully Saved')
return render(request, 'pages/contact.html')
|
from django.shortcuts import render
from .models import Team,ContactForm
from cars.models import Car
from django.contrib import messages
# Create your views here.
def index(request):
teams=Team.objects.all()
cars = Car.objects.order_by("-created_date").filter(is_featured=True)
all_cars=Car.objects.order_by("-created_date").all()
model_field=Car.objects.values_list('model',flat=True).distinct()
state_field=Car.objects.values_list('state',flat=True).distinct()
body_field=Car.objects.values_list('body_style',flat=True).distinct()
year_field=Car.objects.values_list('year',flat=True).distinct()
return render(request,'pages/index.html',{'teams':teams,'featured_cars':cars,"all_cars":all_cars,'model_field':model_field,'state_field':state_field,'body_field':body_field,'year_field':year_field})
def about(request):
teams = Team.objects.all()
return render(request,'pages/about.html',{'teams':teams})
def service(request):
return render(request,'pages/services.html')
def contact(request):
if request.method == 'POST':
name=request.POST['name']
email=request.POST['email']
subject=request.POST['subject']
phone=request.POST['phone']
message=request.POST['message']
cfm=ContactForm(name=name,email=email,subject=subject,phone=phone,message=message)
cfm.save()
messages.success(request,'Successfully Saved')
return render(request,'pages/contact.html')
|
[
2,
3,
4,
5,
6
] |
2,418 |
77a82f99ab10e3d53e3f8466d43b67e8b87c1588
|
<mask token>
|
print(1)
print(2)
print('Jenkins')
print('Jenkins2')
print('Jenkins3')
print('Jenkins44')
print('Jenkins55khlk')
print('3333333')
print('44444444')
print('jhjhj')
|
print(1)
print(2)
print("Jenkins")
print("Jenkins2")
print("Jenkins3")
print("Jenkins44")
print("Jenkins55khlk")
print("3333333")
print("44444444")
print("jhjhj")
| null | null |
[
0,
1,
2
] |
2,419 |
28f4f14c3c29ee96c370ffe71c268549552b915e
|
<mask token>
class Person(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class PersonTemporaryCode(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
code = models.IntegerField()
expiration_date = models.DateTimeField()
def __str__(self):
return f'{self.person} - {self.code} -- {self.expiration_date}'
|
<mask token>
class Person(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __str__(self):
return '{} {}'.format(self.name, self.last_name)
class PersonTemporaryCode(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
code = models.IntegerField()
expiration_date = models.DateTimeField()
def __str__(self):
return f'{self.person} - {self.code} -- {self.expiration_date}'
|
<mask token>
class Person(models.Model):
user = models.ForeignKey(User, related_name='person', on_delete=models.
CASCADE, blank=True, null=True)
event = models.ForeignKey(Event, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField(unique=True, validators=[validate_email])
university = models.ForeignKey(University, on_delete=models.PROTECT)
rut = models.CharField(max_length=13, unique=True)
phone_number = models.CharField(max_length=20)
emergency_phone_number = models.CharField(max_length=20, null=True)
avatar = models.ImageField(upload_to='person_avatars/', blank=True)
pending_messages = models.IntegerField(default=0)
def __str__(self):
return '{} {}'.format(self.name, self.last_name)
class PersonTemporaryCode(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
code = models.IntegerField()
expiration_date = models.DateTimeField()
def __str__(self):
return f'{self.person} - {self.code} -- {self.expiration_date}'
|
from django.db import models
from django.contrib.auth.models import User
from Event.models import Event
from University.models import University
from django.core.validators import validate_email
class Person(models.Model):
user = models.ForeignKey(User, related_name='person', on_delete=models.
CASCADE, blank=True, null=True)
event = models.ForeignKey(Event, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField(unique=True, validators=[validate_email])
university = models.ForeignKey(University, on_delete=models.PROTECT)
rut = models.CharField(max_length=13, unique=True)
phone_number = models.CharField(max_length=20)
emergency_phone_number = models.CharField(max_length=20, null=True)
avatar = models.ImageField(upload_to='person_avatars/', blank=True)
pending_messages = models.IntegerField(default=0)
def __str__(self):
return '{} {}'.format(self.name, self.last_name)
class PersonTemporaryCode(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
code = models.IntegerField()
expiration_date = models.DateTimeField()
def __str__(self):
return f'{self.person} - {self.code} -- {self.expiration_date}'
|
from django.db import models
from django.contrib.auth.models import User
from Event.models import Event
from University.models import University
from django.core.validators import validate_email
class Person(models.Model):
user = models.ForeignKey(
User, related_name='person', on_delete=models.CASCADE,
blank=True, null=True
)
event = models.ForeignKey(Event, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField(unique=True, validators=[validate_email])
university = models.ForeignKey(University, on_delete=models.PROTECT)
rut = models.CharField(max_length=13, unique=True)
phone_number = models.CharField(max_length=20)
emergency_phone_number = models.CharField(max_length=20, null=True)
avatar = models.ImageField(upload_to='person_avatars/', blank=True)
pending_messages = models.IntegerField(default=0)
def __str__(self):
return '{} {}'.format(self.name, self.last_name)
class PersonTemporaryCode(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
code = models.IntegerField()
expiration_date = models.DateTimeField()
def __str__(self):
return f'{self.person} - {self.code} -- {self.expiration_date}'
|
[
4,
5,
6,
7,
8
] |
2,420 |
660334be611c30397c2f33890e1bca1fc43bd01f
|
<mask token>
def PMHD(p, chi, b):
return b ** 2 / p * (1 + sin(chi) ** 2)
def xMHD(p, chi, b):
return -b ** 2 / p ** 2 * sin(chi) * cos(chi)
def PBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))
def xBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)
<mask token>
|
<mask token>
def PMHD(p, chi, b):
return b ** 2 / p * (1 + sin(chi) ** 2)
def xMHD(p, chi, b):
return -b ** 2 / p ** 2 * sin(chi) * cos(chi)
def PBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))
def xBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)
<mask token>
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PMHD(P, x, B12) * dx
x = x + xMHD(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PBGI(P, x, B12) * dx
x = x + xBGI(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1
for i in range(90):
j = int(10 * Pend)
AngMHD[i, 0] = i
AngBGI[i, 0] = i
AngMHD[i, 1] = MHD[j, i, 0]
AngBGI[i, 1] = BGI[j, i, 0]
<mask token>
plt.xlim(1, 90)
plt.ylim(0, 1.2 * ymax)
<mask token>
plt.scatter(X1, Y1, color='blue', s=15, label='MHD')
plt.scatter(X2, Y2, color='red', s=15, label='BGI')
plt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +
str(B12) + '')
plt.grid(True, which='both', ls='-')
plt.grid(True, which='both', ls='-')
plt.xlabel('$\\chi$')
plt.legend()
plt.show()
|
<mask token>
MHD = np.zeros((80, 90, 5), dtype=float)
BGI = np.zeros((80, 90, 5), dtype=float)
Fp = np.zeros(80, dtype=float)
AngMHD = np.zeros((90, 2), dtype=float)
AngBGI = np.zeros((90, 2), dtype=float)
B0 = [0.5, 1.5, 3, 5, 10]
V = [0.3, 0.3, 0.2, 0.1, 0.1]
def PMHD(p, chi, b):
return b ** 2 / p * (1 + sin(chi) ** 2)
def xMHD(p, chi, b):
return -b ** 2 / p ** 2 * sin(chi) * cos(chi)
def PBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))
def xBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)
P0 = 0.3
Pend = 1
B12 = 4
dx = 0.0001
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PMHD(P, x, B12) * dx
x = x + xMHD(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PBGI(P, x, B12) * dx
x = x + xBGI(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1
for i in range(90):
j = int(10 * Pend)
AngMHD[i, 0] = i
AngBGI[i, 0] = i
AngMHD[i, 1] = MHD[j, i, 0]
AngBGI[i, 1] = BGI[j, i, 0]
ymax = np.max(AngBGI)
fig, ax = plt.subplots()
x = np.linspace(0, 90)
plt.xlim(1, 90)
plt.ylim(0, 1.2 * ymax)
data1 = np.array(AngMHD)
data2 = np.array(AngBGI)
X1, Y1 = data1.T
X2, Y2 = data2.T
plt.scatter(X1, Y1, color='blue', s=15, label='MHD')
plt.scatter(X2, Y2, color='red', s=15, label='BGI')
plt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +
str(B12) + '')
plt.grid(True, which='both', ls='-')
plt.grid(True, which='both', ls='-')
plt.xlabel('$\\chi$')
plt.legend()
plt.show()
|
import numpy as np
import matplotlib.pyplot as plt
from math import *
from scipy.integrate import *
from pylab import *
from scipy.integrate import quad
MHD = np.zeros((80, 90, 5), dtype=float)
BGI = np.zeros((80, 90, 5), dtype=float)
Fp = np.zeros(80, dtype=float)
AngMHD = np.zeros((90, 2), dtype=float)
AngBGI = np.zeros((90, 2), dtype=float)
B0 = [0.5, 1.5, 3, 5, 10]
V = [0.3, 0.3, 0.2, 0.1, 0.1]
def PMHD(p, chi, b):
return b ** 2 / p * (1 + sin(chi) ** 2)
def xMHD(p, chi, b):
return -b ** 2 / p ** 2 * sin(chi) * cos(chi)
def PBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))
def xBGI(p, chi, b):
Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)
P0 = 0.3
Pend = 1
B12 = 4
dx = 0.0001
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PMHD(P, x, B12) * dx
x = x + xMHD(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1
for i in range(450):
xi0 = i / 5 + 0.1
x0 = pi / 180 * xi0
P = P0
x = x0
while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:
P = P + PBGI(P, x, B12) * dx
x = x + xBGI(P, x, B12) * dx
gx = 180 / pi * x
iP = int(P / 0.1)
ix = int(gx)
if iP < 80:
BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1
for i in range(90):
j = int(10 * Pend)
AngMHD[i, 0] = i
AngBGI[i, 0] = i
AngMHD[i, 1] = MHD[j, i, 0]
AngBGI[i, 1] = BGI[j, i, 0]
ymax = np.max(AngBGI)
fig, ax = plt.subplots()
x = np.linspace(0, 90)
plt.xlim(1, 90)
plt.ylim(0, 1.2 * ymax)
data1 = np.array(AngMHD)
data2 = np.array(AngBGI)
X1, Y1 = data1.T
X2, Y2 = data2.T
plt.scatter(X1, Y1, color='blue', s=15, label='MHD')
plt.scatter(X2, Y2, color='red', s=15, label='BGI')
plt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +
str(B12) + '')
plt.grid(True, which='both', ls='-')
plt.grid(True, which='both', ls='-')
plt.xlabel('$\\chi$')
plt.legend()
plt.show()
|
import numpy as np
import matplotlib.pyplot as plt
from math import *
from scipy.integrate import *
from pylab import *
from scipy.integrate import quad
MHD = np.zeros((80, 90, 5), dtype=float)
BGI = np.zeros((80, 90, 5), dtype=float)
Fp = np.zeros((80), dtype=float)
AngMHD = np.zeros((90,2), dtype=float)
AngBGI = np.zeros((90,2), dtype=float)
B0 = [0.5, 1.5, 3, 5, 10]
V = [0.3, 0.3, 0.2, 0.1, 0.1]
def PMHD(p, chi, b):
return b**2/p*(1 +(sin(chi))**2)
def xMHD(p, chi, b):
return -b**2/p**2*sin(chi)*cos(chi)
def PBGI(p, chi, b):
Q = 0.7*p/b**0.57/sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return b**2/p*(A*(cos(chi))**2 + 0.01/sqrt(p))
def xBGI(p, chi, b):
Q = 0.7*p/b**0.57/sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return A*b**2/p**2*sin(chi)*cos(chi)
P0 = 0.3
Pend = 1
B12 = 4
dx = 0.0001
for i in range(450):
xi0 = i/5 + 0.1
x0 = pi/180*xi0
P = P0
x = x0
while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:
P = P + PMHD(P, x, B12)*dx
x = x + xMHD(P, x, B12)*dx
gx = 180/pi*x
iP = int(P/0.1)
ix = int(gx)
if iP < 80:
MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1
for i in range(450):
xi0 = i/5 + 0.1
x0 = pi/180*xi0
P = P0
x = x0
while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:
P = P + PBGI(P, x, B12)*dx
x = x + xBGI(P, x, B12)*dx
gx = 180/pi*x
iP = int(P/0.1)
ix = int(gx)
if iP < 80:
BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1
#for j in range(80):
# for i in range(90):
# Fp[j] = Fp[j] + PxiB[j, i, 0]
# print(j/10, Fp[j])
for i in range(90):
j = int(10*Pend)
AngMHD[i,0] = i
AngBGI[i,0] = i
AngMHD[i,1] = MHD[j, i, 0]
AngBGI[i,1] = BGI[j, i, 0]
# print(i, PxiB[10, i, 0])
ymax = np.max(AngBGI)
fig, ax = plt.subplots()
x = np.linspace(0, 90)
plt.xlim(1, 90)
plt.ylim(0, 1.2*ymax)
data1 = np.array(AngMHD)
data2 = np.array(AngBGI)
X1,Y1 = data1.T
X2,Y2 = data2.T
plt.scatter(X1,Y1, color = 'blue', s=15, label="MHD")
plt.scatter(X2,Y2, color = 'red', s=15, label="BGI")
plt.title('$P_0$ = '+str(P0)+', P = '+str(Pend)+', $B_{12}$ = '+str(B12)+'')
plt.grid(True,which="both", ls="-")
plt.grid(True,which="both", ls="-")
plt.xlabel('$\chi$')
#plt.ylabel('$\lambda g(x_{0})$')
plt.legend()
plt.show()
#fig, ax = plt.subplots()
#x = np.linspace(0, 1)
#plt.xlim(0.0001, 1.0)
#plt.ylim(0, 0.1)
#plt.plot(x, x**2*(cos(ch)*(1 - x**2) + 1/2*sin(ch)*(x - x**3))**3, label="fitting")
#plt.title(''+str(PSR)+', $n_{\pm}$ (P = '+str(P)+', $B_{12}$ = '+str(B12)+', $\chi$ = '+str(chi)+'$^{\circ}$), $\lambda = 92$')
#plt.grid(True,which="both", ls="-")
#plt.grid(True,which="both", ls="-")
##ax.vlines(xcr, 0, 8, color = 'black', linewidth = 1.5, linestyle = '--')
#plt.xlabel('$r_{0}/R_0$')
#plt.ylabel('$n_{\pm}$')
#plt.legend()
#plt.show()
|
[
4,
5,
6,
7,
8
] |
2,421 |
95ea8a21d3ac44c7760179bc4ebf67f0c16e6a19
|
<mask token>
class SpecificationsEventHandler(FileSystemEventHandler):
<mask token>
def __init__(self):
self.paused = False
self.banner = (
'============================================================')
<mask token>
<mask token>
|
<mask token>
class SpecificationsEventHandler(FileSystemEventHandler):
<mask token>
def __init__(self):
self.paused = False
self.banner = (
'============================================================')
def on_modified(self, event):
super(SpecificationsEventHandler, self).on_modified(event)
"""
Description:
Catches the file modified event from the watchdog package and
creates the full path to the file for submission to the test engine
of choice.
Args:
event: Contains the information for the file system event
when modification has occurred
"""
if event.is_directory:
return
if self.paused:
return
if event.src_path.endswith('_specs.py') and not self.paused:
self.paused = True
directory = os.path.abspath(os.path.dirname(event.src_path))
filename = os.path.basename(event.src_path)
file = os.path.join(directory, filename)
print(self.banner, end='\n')
print('testing specifications found in file: {0}'.format(file))
print('')
subprocess.call(['mamba', file], shell=True)
print(self.banner, end='\n')
self.paused = False
return
<mask token>
|
<mask token>
class SpecificationsEventHandler(FileSystemEventHandler):
"""Runs the tests inside the specifications class when any specification file is modified
"""
def __init__(self):
self.paused = False
self.banner = (
'============================================================')
def on_modified(self, event):
super(SpecificationsEventHandler, self).on_modified(event)
"""
Description:
Catches the file modified event from the watchdog package and
creates the full path to the file for submission to the test engine
of choice.
Args:
event: Contains the information for the file system event
when modification has occurred
"""
if event.is_directory:
return
if self.paused:
return
if event.src_path.endswith('_specs.py') and not self.paused:
self.paused = True
directory = os.path.abspath(os.path.dirname(event.src_path))
filename = os.path.basename(event.src_path)
file = os.path.join(directory, filename)
print(self.banner, end='\n')
print('testing specifications found in file: {0}'.format(file))
print('')
subprocess.call(['mamba', file], shell=True)
print(self.banner, end='\n')
self.paused = False
return
if __name__ == '__main__':
path = sys.argv[1]
event_handler = SpecificationsEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
<mask token>
import sys
import os.path
import subprocess
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class SpecificationsEventHandler(FileSystemEventHandler):
"""Runs the tests inside the specifications class when any specification file is modified
"""
def __init__(self):
self.paused = False
self.banner = (
'============================================================')
def on_modified(self, event):
super(SpecificationsEventHandler, self).on_modified(event)
"""
Description:
Catches the file modified event from the watchdog package and
creates the full path to the file for submission to the test engine
of choice.
Args:
event: Contains the information for the file system event
when modification has occurred
"""
if event.is_directory:
return
if self.paused:
return
if event.src_path.endswith('_specs.py') and not self.paused:
self.paused = True
directory = os.path.abspath(os.path.dirname(event.src_path))
filename = os.path.basename(event.src_path)
file = os.path.join(directory, filename)
print(self.banner, end='\n')
print('testing specifications found in file: {0}'.format(file))
print('')
subprocess.call(['mamba', file], shell=True)
print(self.banner, end='\n')
self.paused = False
return
if __name__ == '__main__':
path = sys.argv[1]
event_handler = SpecificationsEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
"""
module : watcher.py
description : Script to automatically watch a directory (via watchdog) for tests and run them via py.test
"""
import sys
import os.path
import subprocess
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class SpecificationsEventHandler(FileSystemEventHandler):
"""Runs the tests inside the specifications class when any specification file is modified
"""
def __init__(self):
self.paused = False
self.banner = "============================================================"
def on_modified(self, event):
super(SpecificationsEventHandler, self).on_modified(event)
"""
Description:
Catches the file modified event from the watchdog package and
creates the full path to the file for submission to the test engine
of choice.
Args:
event: Contains the information for the file system event
when modification has occurred
"""
# file modified triggers directory modified as well...
if event.is_directory:
return
if self.paused:
return
if event.src_path.endswith("_specs.py") and not self.paused:
self.paused = True
#filename = os.path.basename(event.src_path)
directory = os.path.abspath(os.path.dirname(event.src_path))
filename = os.path.basename(event.src_path)
file = os.path.join(directory, filename)
print(self.banner, end="\n")
print("testing specifications found in file: {0}".format(file))
print("")
# if using pytest, uncomment the line below
#subprocess.call(['py.test', '-v', file], shell=True)
#using mamba as the test engine:
subprocess.call(['mamba', file], shell=True)
print(self.banner, end="\n")
self.paused = False
return
if __name__ == "__main__":
path = sys.argv[1]
event_handler = SpecificationsEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
[
2,
3,
5,
6,
7
] |
2,422 |
18a49d46b39fe6e00e2ad137984cceab82f1e94b
|
<mask token>
class PromptMessage(QWidget):
<mask token>
def set_stay(self, stay):
self.m_stay = stay
<mask token>
def on_move(self):
self.m_desktop_height = self.m_desktop_height - 10
self.move(self.m_point.x(), self.m_desktop_height)
if self.m_desktop_height <= self.m_point.y():
self.m_show_tm.stop()
time.sleep(self.m_stay)
self.close()
<mask token>
|
<mask token>
class PromptMessage(QWidget):
<mask token>
def set_stay(self, stay):
self.m_stay = stay
def show_message(self, message_list):
self.m_show_tm.timeout.connect(self.on_move)
layout = QGridLayout()
num = len(message_list)
for i in range(num):
label = QLabel()
label.setText(message_list[i])
layout.addWidget(label, i, 0)
self.setLayout(layout)
self.adjustSize()
rect = QApplication.desktop().availableGeometry()
rect1 = QApplication.desktop().screenGeometry()
self.m_desktop_height = rect.height()
self.setMaximumSize(rect.width() * 0.1, rect.height() * 0.1)
self.setWindowFlags(Qt.FramelessWindowHint)
self.m_point.setX(rect.width() - self.width())
self.m_point.setY(rect.height() - self.height() - (rect1.height() -
rect.height()))
self.setWindowOpacity(0.8)
self.show()
self.m_show_tm.start(100)
def on_move(self):
self.m_desktop_height = self.m_desktop_height - 10
self.move(self.m_point.x(), self.m_desktop_height)
if self.m_desktop_height <= self.m_point.y():
self.m_show_tm.stop()
time.sleep(self.m_stay)
self.close()
<mask token>
|
<mask token>
class PromptMessage(QWidget):
def __init__(self, parent=None):
super(PromptMessage, self).__init__(parent)
self.m_show_tm = QTimer()
self.m_stay_tm = QTimer()
self.m_close_tm = QTimer()
self.m_point = QPoint()
self.m_stay = 2
def set_stay(self, stay):
self.m_stay = stay
def show_message(self, message_list):
self.m_show_tm.timeout.connect(self.on_move)
layout = QGridLayout()
num = len(message_list)
for i in range(num):
label = QLabel()
label.setText(message_list[i])
layout.addWidget(label, i, 0)
self.setLayout(layout)
self.adjustSize()
rect = QApplication.desktop().availableGeometry()
rect1 = QApplication.desktop().screenGeometry()
self.m_desktop_height = rect.height()
self.setMaximumSize(rect.width() * 0.1, rect.height() * 0.1)
self.setWindowFlags(Qt.FramelessWindowHint)
self.m_point.setX(rect.width() - self.width())
self.m_point.setY(rect.height() - self.height() - (rect1.height() -
rect.height()))
self.setWindowOpacity(0.8)
self.show()
self.m_show_tm.start(100)
def on_move(self):
self.m_desktop_height = self.m_desktop_height - 10
self.move(self.m_point.x(), self.m_desktop_height)
if self.m_desktop_height <= self.m_point.y():
self.m_show_tm.stop()
time.sleep(self.m_stay)
self.close()
<mask token>
def logs():
print(sys._getframe().f_code.co_name)
print(sys._getframe().f_back.f_code.co_name)
print(sys._getframe().f_back.f_lineno)
print(sys._getframe().f_back.f_code.co_filename)
def get_cur_info():
logs()
if __name__ == '__main__':
from CommonAPI.Log import LOG_ERROR
|
import sys
import time
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5 import *
class PromptMessage(QWidget):
def __init__(self, parent=None):
super(PromptMessage, self).__init__(parent)
self.m_show_tm = QTimer()
self.m_stay_tm = QTimer()
self.m_close_tm = QTimer()
self.m_point = QPoint()
self.m_stay = 2
def set_stay(self, stay):
self.m_stay = stay
def show_message(self, message_list):
self.m_show_tm.timeout.connect(self.on_move)
layout = QGridLayout()
num = len(message_list)
for i in range(num):
label = QLabel()
label.setText(message_list[i])
layout.addWidget(label, i, 0)
self.setLayout(layout)
self.adjustSize()
rect = QApplication.desktop().availableGeometry()
rect1 = QApplication.desktop().screenGeometry()
self.m_desktop_height = rect.height()
self.setMaximumSize(rect.width() * 0.1, rect.height() * 0.1)
self.setWindowFlags(Qt.FramelessWindowHint)
self.m_point.setX(rect.width() - self.width())
self.m_point.setY(rect.height() - self.height() - (rect1.height() -
rect.height()))
self.setWindowOpacity(0.8)
self.show()
self.m_show_tm.start(100)
def on_move(self):
self.m_desktop_height = self.m_desktop_height - 10
self.move(self.m_point.x(), self.m_desktop_height)
if self.m_desktop_height <= self.m_point.y():
self.m_show_tm.stop()
time.sleep(self.m_stay)
self.close()
import sys
def logs():
print(sys._getframe().f_code.co_name)
print(sys._getframe().f_back.f_code.co_name)
print(sys._getframe().f_back.f_lineno)
print(sys._getframe().f_back.f_code.co_filename)
def get_cur_info():
logs()
if __name__ == '__main__':
from CommonAPI.Log import LOG_ERROR
|
import sys
import time
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5 import *
class PromptMessage(QWidget):
def __init__(self, parent = None):
super(PromptMessage,self).__init__(parent)
self.m_show_tm = QTimer()
self.m_stay_tm = QTimer()
self.m_close_tm = QTimer()
self.m_point = QPoint()
self.m_stay=2
def set_stay(self, stay):
self.m_stay = stay
def show_message(self, message_list):
self.m_show_tm.timeout.connect(self.on_move)
layout=QGridLayout()
num=len(message_list)
for i in range(num):
label=QLabel()
label.setText(message_list[i])
layout.addWidget(label, i, 0)
self.setLayout(layout)
self.adjustSize()
rect = QApplication.desktop().availableGeometry()
rect1 = QApplication.desktop().screenGeometry ()
self.m_desktop_height=rect.height()
self.setMaximumSize(rect.width() * 0.1, rect.height() * 0.1)
self.setWindowFlags(Qt.FramelessWindowHint);
self.m_point.setX(rect.width() - self.width())
self.m_point.setY(rect.height() - self.height() - (rect1.height() - rect.height()))
#self.move(self.m_point)
self.setWindowOpacity(0.8)
self.show()
self.m_show_tm.start(100)
def on_move(self):
self.m_desktop_height = self.m_desktop_height - 10
self.move(self.m_point.x(), self.m_desktop_height)
if self.m_desktop_height <= self.m_point.y():
self.m_show_tm.stop()
time.sleep(self.m_stay)
self.close()
import sys
def logs():
print(sys._getframe().f_code.co_name)
print(sys._getframe().f_back.f_code.co_name)
print(sys._getframe().f_back.f_lineno)
print(sys._getframe().f_back.f_code.co_filename)
def get_cur_info() :
logs() #模拟写日志
if __name__ == "__main__":
from CommonAPI.Log import LOG_ERROR
|
[
3,
4,
8,
9,
10
] |
2,423 |
74bc530d53cd86c52c44ba8e98d4d8f502032340
|
<mask token>
class TestCRMcreateCustomer(TestCRM):
<mask token>
<mask token>
<mask token>
|
<mask token>
class TestCRMcreateCustomer(TestCRM):
def createCustomer(self):
self.driver.click('text= 客户 ')
self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'crm000001')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'c000001')
self.driver.click_index('class=android.view.View', 59)
self.driver.click('text=电话营销')
self.driver.click('text=保存')
self.driver.click_index('class=android.view.View', 10)
def test_weiChat(self):
self.login()
self.createCustomer()
self.logout()
<mask token>
|
<mask token>
class TestCRMcreateCustomer(TestCRM):
def createCustomer(self):
self.driver.click('text= 客户 ')
self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'crm000001')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'c000001')
self.driver.click_index('class=android.view.View', 59)
self.driver.click('text=电话营销')
self.driver.click('text=保存')
self.driver.click_index('class=android.view.View', 10)
def test_weiChat(self):
self.login()
self.createCustomer()
self.logout()
if __name__ == '__main__':
report_path = os.path.dirname(__file__
) + '/report/' + 'TestCRM_report.html'
suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)
runer = HTMLTestRunner(title='悟空CRM测试报告', description='登录', stream=open
(report_path, 'wb'), verbosity=2, retry=0, save_last_try=True)
runer.run(suite)
|
import os
import unittest
from HTMLTestRunner_cn import HTMLTestRunner
from time import sleep
from framework.SunFlower import SunFlower
from testcase.TestCRM import TestCRM
class TestCRMcreateCustomer(TestCRM):
def createCustomer(self):
self.driver.click('text= 客户 ')
self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'crm000001')
self.driver.send_keys('xpath=//*[@text="请输入"][1]', 'c000001')
self.driver.click_index('class=android.view.View', 59)
self.driver.click('text=电话营销')
self.driver.click('text=保存')
self.driver.click_index('class=android.view.View', 10)
def test_weiChat(self):
self.login()
self.createCustomer()
self.logout()
if __name__ == '__main__':
report_path = os.path.dirname(__file__
) + '/report/' + 'TestCRM_report.html'
suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)
runer = HTMLTestRunner(title='悟空CRM测试报告', description='登录', stream=open
(report_path, 'wb'), verbosity=2, retry=0, save_last_try=True)
runer.run(suite)
|
# -*- encoding:utf-8 -*-
import os
import unittest
from HTMLTestRunner_cn import HTMLTestRunner
from time import sleep
from framework.SunFlower import SunFlower
from testcase.TestCRM import TestCRM
class TestCRMcreateCustomer(TestCRM):
# 创建客户
def createCustomer(self):
# 点击客户图标
self.driver.click("text= 客户 ")
# 点击添加客户按钮
self.driver.click("text=sYVInwAAAABJRU5ErkJggg==")
#输入客户名称
self.driver.send_keys("xpath=//*[@text=\"请输入\"][1]","crm000001")
#输入客户编号
self.driver.send_keys("xpath=//*[@text=\"请输入\"][1]","c000001")
#选择客户信息来源
self.driver.click_index("class=android.view.View",59)
self.driver.click("text=电话营销")
#保存
self.driver.click("text=保存")
#点击返回
self.driver.click_index("class=android.view.View",10)
# sleep(5)
# # # 向上滑动屏幕
# # self.driver.swipe_up(n=3)
def test_weiChat(self):
self.login()
self.createCustomer()
self.logout()
if __name__ == "__main__":
report_path = os.path.dirname(__file__) + "/report/" + "TestCRM_report.html"
suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)
runer = HTMLTestRunner(title="悟空CRM测试报告", description="登录", stream=open(report_path, "wb"),
verbosity=2, retry=0, save_last_try=True)
runer.run(suite)
|
[
1,
3,
4,
5,
6
] |
2,424 |
825f3b930fee319314d520a32c2f9dcd718505ab
|
<mask token>
|
<mask token>
for _ in range(int(input())):
noe = int(input())
arr = [int(x) for x in input().split()]
left = arr[0]
rite = sum(arr) - left
mins = abs(rite - left)
for i in range(1, noe - 1):
left += arr[i]
rite -= arr[i]
print(left, rite)
mins = min(mins, abs(left - rite))
print(mins)
|
'''
Sample Input
1
5
1 2 3 2 1
Sample Output
3
'''
for _ in range(int(input())):
noe = int(input())
arr = [int(x) for x in input().split()]
left = arr[0]
rite = sum(arr) - left
mins = abs(rite - left)
for i in range(1, noe-1):
left += arr[i]
rite -= arr[i]
print(left, rite)
mins = min(mins, abs(left - rite))
print(mins)
| null | null |
[
0,
1,
2
] |
2,425 |
ff0495ee1f4aa1f243c82b709a974d3d7c37e8bd
|
<mask token>
|
<mask token>
if download_dir != '':
os.chdir(download_dir)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
soup.findAll('a')
one_a_tag = soup.findAll('a')[startindex:]
links = [one_a_tag[i]['href'] for i in range(len(one_a_tag))]
for link in links:
print(link)
download_url = url + link
urllib.request.urlretrieve(download_url, './' + link)
if argentina:
subprocess.check_call(['cdo', 'sellonlatbox,-80,-44,-60,-20',
link, link.replace('.nc', 'ARG.nc')])
subprocess.check_call(['rm', link])
time.sleep(1)
else:
print('Please enter a valid download direction')
|
<mask token>
download_dir = ''
url = 'https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_dekad/netcdf/'
argentina = False
startindex = 5
if download_dir != '':
os.chdir(download_dir)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
soup.findAll('a')
one_a_tag = soup.findAll('a')[startindex:]
links = [one_a_tag[i]['href'] for i in range(len(one_a_tag))]
for link in links:
print(link)
download_url = url + link
urllib.request.urlretrieve(download_url, './' + link)
if argentina:
subprocess.check_call(['cdo', 'sellonlatbox,-80,-44,-60,-20',
link, link.replace('.nc', 'ARG.nc')])
subprocess.check_call(['rm', link])
time.sleep(1)
else:
print('Please enter a valid download direction')
|
<mask token>
import os
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import subprocess
download_dir = ''
url = 'https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_dekad/netcdf/'
argentina = False
startindex = 5
if download_dir != '':
os.chdir(download_dir)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
soup.findAll('a')
one_a_tag = soup.findAll('a')[startindex:]
links = [one_a_tag[i]['href'] for i in range(len(one_a_tag))]
for link in links:
print(link)
download_url = url + link
urllib.request.urlretrieve(download_url, './' + link)
if argentina:
subprocess.check_call(['cdo', 'sellonlatbox,-80,-44,-60,-20',
link, link.replace('.nc', 'ARG.nc')])
subprocess.check_call(['rm', link])
time.sleep(1)
else:
print('Please enter a valid download direction')
|
"""
Download the full CHIRPS 2.0 data for a specific type (dekads, pentads, daily ...)
with the possibility to automatically recut the data over Argentina.
"""
import os
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import subprocess
##############
# PARAMETERS to define
# Set a pre-existing directory where the CHIRPS files must be saved
download_dir = ""
# Url for global dekad, change if you want another product
url = 'https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_dekad/netcdf/'
# Recut the data over Argentina
argentina = False
startindex = 5
##############
if download_dir != "":
os.chdir(download_dir)
response = requests.get(url)
soup = BeautifulSoup(response.text,"html.parser")
soup.findAll('a')
# First link to download in the page
# Here the index = 5 is valid for the dekad link but it may change if you download another product (ex : daily, dekad, monthly)
# To be sure you can check the link and check that it is the first year
one_a_tag = soup.findAll('a')[startindex:]
links = [one_a_tag[i]['href'] for i in range(len(one_a_tag))]
for link in links:
print(link)
download_url = url + link
urllib.request.urlretrieve(download_url,"./"+link)
# Section to recut CHIRPS over Argentina
if argentina:
subprocess.check_call(["cdo", "sellonlatbox,-80,-44,-60,-20", link, link.replace(".nc", "ARG.nc")])
subprocess.check_call(["rm", link])
time.sleep(1)
else:
print("Please enter a valid download direction")
|
[
0,
1,
2,
3,
4
] |
2,426 |
70b08b9e8c1510a9be48a4bc1de39c6c85b36eed
|
<mask token>
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),
nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU
(True))
self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU
(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)
)
def forward(self, x):
x = self.encoder(x)
x = x.view(-1, 16 * 4 * 4)
x = self.classifier(x)
return x
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
test = MVPP('programs/mnist.txt')
for batch_idx, (data, target) in enumerate(train_loader):
for inner_iter in range(1):
data, target = data.to(device), target.to(device)
output = model(data)
test.parameters = output.tolist()
test.normalize_probs()
value = sum(target.tolist())
observation = ':- not addition(i1,i2,' + str(value) + ').'
gradients = test.gradients_one_obs(observation)
if device.type == 'cuda':
grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)
else:
grad_by_prob = -1 * torch.FloatTensor(gradients)
loss = F.nll_loss(output, target)
output.backward(grad_by_prob, retain_graph=True)
if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:
optimizer.step()
optimizer.zero_grad()
if batch_idx % args.log_interval == 0 and inner_iter == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
format(epoch, batch_idx * len(data), len(train_loader.
dataset), 100.0 * batch_idx / len(train_loader), loss.
item()))
print(observation)
print('Output: {}'.format(output.data.tolist()))
print('Gradient: {}'.format(grad_by_prob))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
format(test_loss, correct, len(test_loader.dataset), 100.0 *
correct / len(test_loader.dataset)))
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N', help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N', help=
'number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help=
'random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1000, metavar=
'N', help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--multiExampleNum', type=int, default=1, metavar=
'N', help=
'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device('cuda' if use_cuda else 'cpu')
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=True, download=True, transform=transforms.Compose([transforms
.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=False, transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.
test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if args.save_model:
torch.save(model.state_dict(), 'mnist_cnn.pt')
<mask token>
|
<mask token>
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),
nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU
(True))
self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU
(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)
)
def forward(self, x):
x = self.encoder(x)
x = x.view(-1, 16 * 4 * 4)
x = self.classifier(x)
return x
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
test = MVPP('programs/mnist.txt')
for batch_idx, (data, target) in enumerate(train_loader):
for inner_iter in range(1):
data, target = data.to(device), target.to(device)
output = model(data)
test.parameters = output.tolist()
test.normalize_probs()
value = sum(target.tolist())
observation = ':- not addition(i1,i2,' + str(value) + ').'
gradients = test.gradients_one_obs(observation)
if device.type == 'cuda':
grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)
else:
grad_by_prob = -1 * torch.FloatTensor(gradients)
loss = F.nll_loss(output, target)
output.backward(grad_by_prob, retain_graph=True)
if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:
optimizer.step()
optimizer.zero_grad()
if batch_idx % args.log_interval == 0 and inner_iter == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
format(epoch, batch_idx * len(data), len(train_loader.
dataset), 100.0 * batch_idx / len(train_loader), loss.
item()))
print(observation)
print('Output: {}'.format(output.data.tolist()))
print('Gradient: {}'.format(grad_by_prob))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
format(test_loss, correct, len(test_loader.dataset), 100.0 *
correct / len(test_loader.dataset)))
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N', help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N', help=
'number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help=
'random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1000, metavar=
'N', help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--multiExampleNum', type=int, default=1, metavar=
'N', help=
'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device('cuda' if use_cuda else 'cpu')
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=True, download=True, transform=transforms.Compose([transforms
.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=False, transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.
test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if args.save_model:
torch.save(model.state_dict(), 'mnist_cnn.pt')
if __name__ == '__main__':
main()
|
<mask token>
dprogram = """
img(i1). img(i2).
addition(A,B,N) :- digit(A,1,N1), digit(B,1,N2), N=N1+N2.
nn(m(X,1), digit, [0,1,2,3,4,5,6,7,8,9]) :- img(X).
"""
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),
nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU
(True))
self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU
(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)
)
def forward(self, x):
x = self.encoder(x)
x = x.view(-1, 16 * 4 * 4)
x = self.classifier(x)
return x
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
test = MVPP('programs/mnist.txt')
for batch_idx, (data, target) in enumerate(train_loader):
for inner_iter in range(1):
data, target = data.to(device), target.to(device)
output = model(data)
test.parameters = output.tolist()
test.normalize_probs()
value = sum(target.tolist())
observation = ':- not addition(i1,i2,' + str(value) + ').'
gradients = test.gradients_one_obs(observation)
if device.type == 'cuda':
grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)
else:
grad_by_prob = -1 * torch.FloatTensor(gradients)
loss = F.nll_loss(output, target)
output.backward(grad_by_prob, retain_graph=True)
if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:
optimizer.step()
optimizer.zero_grad()
if batch_idx % args.log_interval == 0 and inner_iter == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
format(epoch, batch_idx * len(data), len(train_loader.
dataset), 100.0 * batch_idx / len(train_loader), loss.
item()))
print(observation)
print('Output: {}'.format(output.data.tolist()))
print('Gradient: {}'.format(grad_by_prob))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
format(test_loss, correct, len(test_loader.dataset), 100.0 *
correct / len(test_loader.dataset)))
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N', help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N', help=
'number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help=
'random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1000, metavar=
'N', help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--multiExampleNum', type=int, default=1, metavar=
'N', help=
'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device('cuda' if use_cuda else 'cpu')
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=True, download=True, transform=transforms.Compose([transforms
.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=False, transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.
test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if args.save_model:
torch.save(model.state_dict(), 'mnist_cnn.pt')
if __name__ == '__main__':
main()
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import sys
import json
import math
from klpmln import MVPP
dprogram = """
img(i1). img(i2).
addition(A,B,N) :- digit(A,1,N1), digit(B,1,N2), N=N1+N2.
nn(m(X,1), digit, [0,1,2,3,4,5,6,7,8,9]) :- img(X).
"""
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),
nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU
(True))
self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU
(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)
)
def forward(self, x):
x = self.encoder(x)
x = x.view(-1, 16 * 4 * 4)
x = self.classifier(x)
return x
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
test = MVPP('programs/mnist.txt')
for batch_idx, (data, target) in enumerate(train_loader):
for inner_iter in range(1):
data, target = data.to(device), target.to(device)
output = model(data)
test.parameters = output.tolist()
test.normalize_probs()
value = sum(target.tolist())
observation = ':- not addition(i1,i2,' + str(value) + ').'
gradients = test.gradients_one_obs(observation)
if device.type == 'cuda':
grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)
else:
grad_by_prob = -1 * torch.FloatTensor(gradients)
loss = F.nll_loss(output, target)
output.backward(grad_by_prob, retain_graph=True)
if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:
optimizer.step()
optimizer.zero_grad()
if batch_idx % args.log_interval == 0 and inner_iter == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
format(epoch, batch_idx * len(data), len(train_loader.
dataset), 100.0 * batch_idx / len(train_loader), loss.
item()))
print(observation)
print('Output: {}'.format(output.data.tolist()))
print('Gradient: {}'.format(grad_by_prob))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
format(test_loss, correct, len(test_loader.dataset), 100.0 *
correct / len(test_loader.dataset)))
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N', help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N', help=
'number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help=
'random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1000, metavar=
'N', help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--multiExampleNum', type=int, default=1, metavar=
'N', help=
'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device('cuda' if use_cuda else 'cpu')
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=True, download=True, transform=transforms.Compose([transforms
.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=False, transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.
test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if args.save_model:
torch.save(model.state_dict(), 'mnist_cnn.pt')
if __name__ == '__main__':
main()
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import sys
import json
import math
from klpmln import MVPP
dprogram = '''
img(i1). img(i2).
addition(A,B,N) :- digit(A,1,N1), digit(B,1,N2), N=N1+N2.
nn(m(X,1), digit, [0,1,2,3,4,5,6,7,8,9]) :- img(X).
'''
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1, 6, 5), # 6 is the output chanel size; 5 is the kernal size; 1 (chanel) 28 28 -> 6 24 24
nn.MaxPool2d(2, 2), # kernal size 2; stride size 2; 6 24 24 -> 6 12 12
nn.ReLU(True), # inplace=True means that it will modify the input directly thus save memory
nn.Conv2d(6, 16, 5), # 6 12 12 -> 16 8 8
nn.MaxPool2d(2, 2), # 16 8 8 -> 16 4 4
nn.ReLU(True)
)
self.classifier = nn.Sequential(
nn.Linear(16 * 4 * 4, 120),
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, 10),
nn.Softmax(1)
)
def forward(self, x):
x = self.encoder(x)
x = x.view(-1, 16 * 4 * 4)
x = self.classifier(x)
# return F.log_softmax(x, dim=1)
return x
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
test = MVPP("programs/mnist.txt")
for batch_idx, (data, target) in enumerate(train_loader):
for inner_iter in range(1):
data, target = data.to(device), target.to(device)
# optimizer.zero_grad()
output = model(data)
# test = MVPP("programs/mnist.txt")
test.parameters = output.tolist()
test.normalize_probs()
# construct observation addition(i1, i2, sum)
value = sum(target.tolist())
observation = ":- not addition(i1,i2,"+ str(value) + ")."
# we calculate gradients with exact computation
gradients = test.gradients_one_obs(observation)
if device.type == 'cuda':
grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)
else:
grad_by_prob = -1 * torch.FloatTensor(gradients)
loss = F.nll_loss(output, target)
output.backward(grad_by_prob, retain_graph=True)
if (batch_idx+1) % args.multiExampleNum == 0 and inner_iter == 0:
optimizer.step()
optimizer.zero_grad()
# optimizer.step()
if batch_idx % args.log_interval == 0 and inner_iter == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
print(observation)
print("Output: {}".format(output.data.tolist()))
print("Gradient: {}".format(grad_by_prob))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N',
help='number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1000, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--multiExampleNum', type=int, default=1, metavar='N',
help='input the number of examples whose gradients are accumulated before back-propogation (default: 10)')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if (args.save_model):
torch.save(model.state_dict(),"mnist_cnn.pt")
if __name__ == '__main__':
main()
|
[
6,
7,
8,
9,
10
] |
2,427 |
2ea335dd8d879731aad7713499440db6d1f60d36
|
<mask token>
class ArchiveParserTest(unittest.TestCase):
<mask token>
def testReadHeaderPass(self):
"""Tests that archive is read when header is correct.
Parses archive content containing only the signature.
"""
try:
archive = archive_parser.Archive(archive_parser.Archive.GLOBAL_SIG)
archive.Parse()
except ValueError:
self.fail('Archive reader read improperly.')
def testReadHeaderFail(self):
"""Tests that parser throws error when header is invalid.
Parses archive content lacking the correct signature.
"""
archive = archive_parser.Archive('Fail.')
self.assertRaises(ValueError, archive.Parse)
def testReadFile(self):
"""Tests that file is read correctly.
Tests that correctly formatted file in archive is read correctly.
"""
content = archive_parser.Archive.GLOBAL_SIG
file_name = 'test_file'
content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -
len(file_name))
content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH
content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH
content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH
content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH
message = 'test file contents'
message_size = str(len(message))
content += message_size + ' ' * (archive_parser.Archive.
CONTENT_SIZE_LENGTH - len(message_size))
content += archive_parser.Archive.END_TAG
content += message
archive = archive_parser.Archive(content)
archive.Parse()
self.assertIn(file_name, archive.files)
self.assertEquals(archive.files[file_name], message)
<mask token>
|
<mask token>
class ArchiveParserTest(unittest.TestCase):
"""Unit tests for archive_parser of vts.utils.python.archive.
"""
def testReadHeaderPass(self):
"""Tests that archive is read when header is correct.
Parses archive content containing only the signature.
"""
try:
archive = archive_parser.Archive(archive_parser.Archive.GLOBAL_SIG)
archive.Parse()
except ValueError:
self.fail('Archive reader read improperly.')
def testReadHeaderFail(self):
"""Tests that parser throws error when header is invalid.
Parses archive content lacking the correct signature.
"""
archive = archive_parser.Archive('Fail.')
self.assertRaises(ValueError, archive.Parse)
def testReadFile(self):
"""Tests that file is read correctly.
Tests that correctly formatted file in archive is read correctly.
"""
content = archive_parser.Archive.GLOBAL_SIG
file_name = 'test_file'
content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -
len(file_name))
content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH
content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH
content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH
content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH
message = 'test file contents'
message_size = str(len(message))
content += message_size + ' ' * (archive_parser.Archive.
CONTENT_SIZE_LENGTH - len(message_size))
content += archive_parser.Archive.END_TAG
content += message
archive = archive_parser.Archive(content)
archive.Parse()
self.assertIn(file_name, archive.files)
self.assertEquals(archive.files[file_name], message)
<mask token>
|
<mask token>
class ArchiveParserTest(unittest.TestCase):
"""Unit tests for archive_parser of vts.utils.python.archive.
"""
def testReadHeaderPass(self):
"""Tests that archive is read when header is correct.
Parses archive content containing only the signature.
"""
try:
archive = archive_parser.Archive(archive_parser.Archive.GLOBAL_SIG)
archive.Parse()
except ValueError:
self.fail('Archive reader read improperly.')
def testReadHeaderFail(self):
"""Tests that parser throws error when header is invalid.
Parses archive content lacking the correct signature.
"""
archive = archive_parser.Archive('Fail.')
self.assertRaises(ValueError, archive.Parse)
def testReadFile(self):
"""Tests that file is read correctly.
Tests that correctly formatted file in archive is read correctly.
"""
content = archive_parser.Archive.GLOBAL_SIG
file_name = 'test_file'
content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -
len(file_name))
content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH
content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH
content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH
content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH
message = 'test file contents'
message_size = str(len(message))
content += message_size + ' ' * (archive_parser.Archive.
CONTENT_SIZE_LENGTH - len(message_size))
content += archive_parser.Archive.END_TAG
content += message
archive = archive_parser.Archive(content)
archive.Parse()
self.assertIn(file_name, archive.files)
self.assertEquals(archive.files[file_name], message)
if __name__ == '__main__':
unittest.main()
|
import os
import unittest
from vts.utils.python.archive import archive_parser
class ArchiveParserTest(unittest.TestCase):
"""Unit tests for archive_parser of vts.utils.python.archive.
"""
def testReadHeaderPass(self):
"""Tests that archive is read when header is correct.
Parses archive content containing only the signature.
"""
try:
archive = archive_parser.Archive(archive_parser.Archive.GLOBAL_SIG)
archive.Parse()
except ValueError:
self.fail('Archive reader read improperly.')
def testReadHeaderFail(self):
"""Tests that parser throws error when header is invalid.
Parses archive content lacking the correct signature.
"""
archive = archive_parser.Archive('Fail.')
self.assertRaises(ValueError, archive.Parse)
def testReadFile(self):
"""Tests that file is read correctly.
Tests that correctly formatted file in archive is read correctly.
"""
content = archive_parser.Archive.GLOBAL_SIG
file_name = 'test_file'
content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -
len(file_name))
content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH
content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH
content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH
content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH
message = 'test file contents'
message_size = str(len(message))
content += message_size + ' ' * (archive_parser.Archive.
CONTENT_SIZE_LENGTH - len(message_size))
content += archive_parser.Archive.END_TAG
content += message
archive = archive_parser.Archive(content)
archive.Parse()
self.assertIn(file_name, archive.files)
self.assertEquals(archive.files[file_name], message)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import unittest
from vts.utils.python.archive import archive_parser
class ArchiveParserTest(unittest.TestCase):
"""Unit tests for archive_parser of vts.utils.python.archive.
"""
def testReadHeaderPass(self):
"""Tests that archive is read when header is correct.
Parses archive content containing only the signature.
"""
try:
archive = archive_parser.Archive(archive_parser.Archive.GLOBAL_SIG)
archive.Parse()
except ValueError:
self.fail('Archive reader read improperly.')
def testReadHeaderFail(self):
"""Tests that parser throws error when header is invalid.
Parses archive content lacking the correct signature.
"""
archive = archive_parser.Archive('Fail.')
self.assertRaises(ValueError, archive.Parse)
def testReadFile(self):
"""Tests that file is read correctly.
Tests that correctly formatted file in archive is read correctly.
"""
content = archive_parser.Archive.GLOBAL_SIG
file_name = 'test_file'
content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -
len(file_name))
content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH
content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH
content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH
content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH
message = 'test file contents'
message_size = str(len(message))
content += message_size + ' ' * (archive_parser.Archive.CONTENT_SIZE_LENGTH -
len(message_size))
content += archive_parser.Archive.END_TAG
content += message
archive = archive_parser.Archive(content)
archive.Parse()
self.assertIn(file_name, archive.files)
self.assertEquals(archive.files[file_name], message)
if __name__ == "__main__":
unittest.main()
|
[
4,
5,
6,
7,
8
] |
2,428 |
d6bc8afcdb7636085b01add860f808024fbe566d
|
import sys
lines = sys.stdin.readlines()
t = int(lines[0])
for i in range(t):
c = i*10+1
n = int(lines[c]) - 1
first = [x.strip() for x in [
lines[c+1],
lines[c+2],
lines[c+3],
lines[c+4]]]
first = [s.split() for s in first]
m = int(lines[c+5]) - 1
second = [x.strip() for x in [
lines[c+6],
lines[c+7],
lines[c+8],
lines[c+9]]]
second = [s.split() for s in second]
results = [a for a in first[n] if a in second[m] and a is not ' ']
if len(results) == 1:
print 'Case #{nr}: {number}'.format(nr=(i+1), number=results[0])
if len(results) > 1:
print 'Case #{nr}: Bad magician!'.format(nr=(i+1))
if len(results) == 0:
print 'Case #{nr}: Volunteer cheated!'.format(nr=(i+1))
| null | null | null | null |
[
0
] |
2,429 |
0b833276ca10118f2d60e229ff03400b03915958
|
<mask token>
class GenomicArray:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def as_columns(self, **columns):
"""Wrap the named columns in this instance's metadata."""
return self.__class__.from_columns(columns, self.meta)
def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):
"""Wrap the given pandas DataFrame in this instance's metadata."""
if reset_index:
dframe = dframe.reset_index(drop=True)
return self.__class__(dframe, self.meta.copy())
def as_series(self, arraylike: Iterable) ->pd.Series:
"""Coerce `arraylike` to a Series with this instance's index."""
return pd.Series(arraylike, index=self.data.index)
<mask token>
<mask token>
def __eq__(self, other) ->bool:
return isinstance(other, self.__class__) and self.data.equals(other
.data)
def __len__(self) ->int:
return len(self.data)
<mask token>
<mask token>
def __setitem__(self, index, value):
"""Assign to a portion of the data."""
if isinstance(index, int):
self.data.iloc[index] = value
elif isinstance(index, str):
self.data[index] = value
elif isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
self.data.loc[index] = value
else:
assert isinstance(index, slice) or len(index) > 0
self.data[index] = value
def __delitem__(self, index):
return NotImplemented
def __iter__(self):
return self.data.itertuples(index=False)
<mask token>
<mask token>
<mask token>
@property
def end(self) ->pd.Series:
"""Get column 'end'."""
return self.data['end']
<mask token>
def autosomes(self, also=None):
"""Select chromosomes w/ integer names, ignoring any 'chr' prefixes."""
is_auto = self.chromosome.str.match('(chr)?\\d+$', na=False)
if not is_auto.any():
return self
if also is not None:
if isinstance(also, pd.Series):
is_auto |= also
else:
if isinstance(also, str):
also = [also]
for a_chrom in also:
is_auto |= self.chromosome == a_chrom
return self[is_auto]
<mask token>
<mask token>
def by_ranges(self, other, mode: str='outer', keep_empty: bool=True
) ->Iterator:
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
for bin_row, subrange in by_ranges(self.data, other.data, mode,
keep_empty):
if len(subrange):
yield bin_row, self.as_dataframe(subrange)
elif keep_empty:
yield bin_row, self.as_rows(subrange)
def coords(self, also: Union[str, Iterable[str]]=()):
"""Iterate over plain coordinates of each bin: chromosome, start, end.
Parameters
----------
also : str, or iterable of strings
Also include these columns from `self`, in addition to chromosome,
start, and end.
Example, yielding rows in BED format:
>>> probes.coords(also=["gene", "strand"])
"""
cols = list(GenomicArray._required_columns)
if also:
if isinstance(also, str):
cols.append(also)
else:
cols.extend(also)
coordframe = self.data.loc[:, cols]
return coordframe.itertuples(index=False)
<mask token>
<mask token>
def in_ranges(self, chrom: Optional[str]=None, starts: Optional[
Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,
mode: str='outer'):
"""Get the GenomicArray portion within the specified ranges.
Similar to `in_ranges`, but concatenating the selections of all the
regions specified by the `starts` and `ends` arrays.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
starts : int array, or None
Start coordinates of ranges to select, in 0-based coordinates.
If None, start from 0.
ends : int array, or None
End coordinates of ranges to select. If None, select to the end of the
chromosome. If `starts` and `ends` are both specified, they must be
arrays of equal length.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
Concatenation of all the subsets of `self` enclosed by the specified
ranges.
"""
table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),
sort=False)
return self.as_dataframe(table)
<mask token>
def iter_ranges_of(self, other, column: str, mode: str='outer',
keep_empty: bool=True):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
column : string
Column name in `self` to extract values from.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
if column not in self.data.columns:
raise ValueError(f'No column named {column!r} in this object')
ser = self.data[column]
for slc in iter_slices(self.data, other.data, mode, keep_empty):
yield ser[slc]
<mask token>
<mask token>
<mask token>
def add_columns(self, **columns):
"""Add the given columns to a copy of this GenomicArray.
Parameters
----------
**columns : array
Keyword arguments where the key is the new column's name and the
value is an array of the same length as `self` which will be the new
column's values.
Returns
-------
GenomicArray or subclass
A new instance of `self` with the given columns included in the
underlying dataframe.
"""
return self.as_dataframe(self.data.assign(**columns))
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def cut(self, other, combine=None):
"""Split this array's regions at the boundaries in `other`."""
return NotImplemented
<mask token>
<mask token>
<mask token>
def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,
Numeric]]=None):
"""Resize each genomic bin by a fixed number of bases at each end.
Bin 'start' values have a minimum of 0, and `chrom_sizes` can
specify each chromosome's maximum 'end' value.
Similar to 'bedtools slop'.
Parameters
----------
bp : int
Number of bases in each direction to expand or shrink each bin.
Applies to 'start' and 'end' values symmetrically, and may be
positive (expand) or negative (shrink).
chrom_sizes : dict of string-to-int
Chromosome name to length in base pairs. If given, all chromosomes
in `self` must be included.
"""
table = self.data
limits = {'lower': 0}
if chrom_sizes:
limits['upper'] = self.chromosome.replace(chrom_sizes)
table = table.assign(start=(table['start'] - bp).clip(**limits),
end=(table['end'] + bp).clip(**limits))
if bp < 0:
ok_size = table['end'] - table['start'] > 0
logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())
table = table[ok_size]
return self.as_dataframe(table.copy())
<mask token>
<mask token>
<mask token>
<mask token>
def _get_gene_map(self) ->OrderedDict:
"""Map unique gene names to their indices in this array.
Returns
-------
OrderedDict
An (ordered) dictionary of unique gene names and the data indices of
their segments in the order of occurrence (genomic order).
"""
if 'gene' not in self.data:
return OrderedDict()
genes: OrderedDict = OrderedDict()
for idx, genestr in self.data['gene'].items():
if pd.isnull(genestr):
continue
for gene in genestr.split(','):
if gene not in genes:
genes[gene] = []
genes[gene].append(idx)
return genes
|
<mask token>
class GenomicArray:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def as_columns(self, **columns):
"""Wrap the named columns in this instance's metadata."""
return self.__class__.from_columns(columns, self.meta)
def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):
"""Wrap the given pandas DataFrame in this instance's metadata."""
if reset_index:
dframe = dframe.reset_index(drop=True)
return self.__class__(dframe, self.meta.copy())
def as_series(self, arraylike: Iterable) ->pd.Series:
"""Coerce `arraylike` to a Series with this instance's index."""
return pd.Series(arraylike, index=self.data.index)
<mask token>
<mask token>
def __eq__(self, other) ->bool:
return isinstance(other, self.__class__) and self.data.equals(other
.data)
def __len__(self) ->int:
return len(self.data)
<mask token>
<mask token>
def __setitem__(self, index, value):
"""Assign to a portion of the data."""
if isinstance(index, int):
self.data.iloc[index] = value
elif isinstance(index, str):
self.data[index] = value
elif isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
self.data.loc[index] = value
else:
assert isinstance(index, slice) or len(index) > 0
self.data[index] = value
def __delitem__(self, index):
return NotImplemented
def __iter__(self):
return self.data.itertuples(index=False)
<mask token>
<mask token>
@property
def start(self) ->pd.Series:
"""Get column 'start'."""
return self.data['start']
@property
def end(self) ->pd.Series:
"""Get column 'end'."""
return self.data['end']
<mask token>
def autosomes(self, also=None):
"""Select chromosomes w/ integer names, ignoring any 'chr' prefixes."""
is_auto = self.chromosome.str.match('(chr)?\\d+$', na=False)
if not is_auto.any():
return self
if also is not None:
if isinstance(also, pd.Series):
is_auto |= also
else:
if isinstance(also, str):
also = [also]
for a_chrom in also:
is_auto |= self.chromosome == a_chrom
return self[is_auto]
<mask token>
<mask token>
def by_ranges(self, other, mode: str='outer', keep_empty: bool=True
) ->Iterator:
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
for bin_row, subrange in by_ranges(self.data, other.data, mode,
keep_empty):
if len(subrange):
yield bin_row, self.as_dataframe(subrange)
elif keep_empty:
yield bin_row, self.as_rows(subrange)
def coords(self, also: Union[str, Iterable[str]]=()):
"""Iterate over plain coordinates of each bin: chromosome, start, end.
Parameters
----------
also : str, or iterable of strings
Also include these columns from `self`, in addition to chromosome,
start, and end.
Example, yielding rows in BED format:
>>> probes.coords(also=["gene", "strand"])
"""
cols = list(GenomicArray._required_columns)
if also:
if isinstance(also, str):
cols.append(also)
else:
cols.extend(also)
coordframe = self.data.loc[:, cols]
return coordframe.itertuples(index=False)
<mask token>
<mask token>
def in_ranges(self, chrom: Optional[str]=None, starts: Optional[
Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,
mode: str='outer'):
"""Get the GenomicArray portion within the specified ranges.
Similar to `in_ranges`, but concatenating the selections of all the
regions specified by the `starts` and `ends` arrays.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
starts : int array, or None
Start coordinates of ranges to select, in 0-based coordinates.
If None, start from 0.
ends : int array, or None
End coordinates of ranges to select. If None, select to the end of the
chromosome. If `starts` and `ends` are both specified, they must be
arrays of equal length.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
Concatenation of all the subsets of `self` enclosed by the specified
ranges.
"""
table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),
sort=False)
return self.as_dataframe(table)
<mask token>
def iter_ranges_of(self, other, column: str, mode: str='outer',
keep_empty: bool=True):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
column : string
Column name in `self` to extract values from.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
if column not in self.data.columns:
raise ValueError(f'No column named {column!r} in this object')
ser = self.data[column]
for slc in iter_slices(self.data, other.data, mode, keep_empty):
yield ser[slc]
<mask token>
<mask token>
<mask token>
def add_columns(self, **columns):
"""Add the given columns to a copy of this GenomicArray.
Parameters
----------
**columns : array
Keyword arguments where the key is the new column's name and the
value is an array of the same length as `self` which will be the new
column's values.
Returns
-------
GenomicArray or subclass
A new instance of `self` with the given columns included in the
underlying dataframe.
"""
return self.as_dataframe(self.data.assign(**columns))
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def cut(self, other, combine=None):
"""Split this array's regions at the boundaries in `other`."""
return NotImplemented
<mask token>
<mask token>
<mask token>
def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,
Numeric]]=None):
"""Resize each genomic bin by a fixed number of bases at each end.
Bin 'start' values have a minimum of 0, and `chrom_sizes` can
specify each chromosome's maximum 'end' value.
Similar to 'bedtools slop'.
Parameters
----------
bp : int
Number of bases in each direction to expand or shrink each bin.
Applies to 'start' and 'end' values symmetrically, and may be
positive (expand) or negative (shrink).
chrom_sizes : dict of string-to-int
Chromosome name to length in base pairs. If given, all chromosomes
in `self` must be included.
"""
table = self.data
limits = {'lower': 0}
if chrom_sizes:
limits['upper'] = self.chromosome.replace(chrom_sizes)
table = table.assign(start=(table['start'] - bp).clip(**limits),
end=(table['end'] + bp).clip(**limits))
if bp < 0:
ok_size = table['end'] - table['start'] > 0
logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())
table = table[ok_size]
return self.as_dataframe(table.copy())
<mask token>
<mask token>
<mask token>
<mask token>
def _get_gene_map(self) ->OrderedDict:
"""Map unique gene names to their indices in this array.
Returns
-------
OrderedDict
An (ordered) dictionary of unique gene names and the data indices of
their segments in the order of occurrence (genomic order).
"""
if 'gene' not in self.data:
return OrderedDict()
genes: OrderedDict = OrderedDict()
for idx, genestr in self.data['gene'].items():
if pd.isnull(genestr):
continue
for gene in genestr.split(','):
if gene not in genes:
genes[gene] = []
genes[gene].append(idx)
return genes
|
<mask token>
class GenomicArray:
<mask token>
<mask token>
<mask token>
def __init__(self, data_table: Optional[Union[Sequence, pd.DataFrame]],
meta_dict: Optional[Mapping]=None):
if data_table is None or isinstance(data_table, (list, tuple)
) and not len(data_table) or isinstance(data_table, pd.DataFrame
) and not len(data_table.columns):
data_table = self._make_blank()
else:
if not isinstance(data_table, pd.DataFrame):
data_table = pd.DataFrame(data_table)
if not all(c in data_table.columns for c in self._required_columns
):
raise ValueError('data table must have at least columns ' +
f'{self._required_columns!r}; got {tuple(data_table.columns)!r}'
)
if len(data_table):
def ok_dtype(col, dtype):
return isinstance(data_table[col].iat[0], dtype)
else:
def ok_dtype(col, dtype):
return data_table[col].dtype == np.dtype(dtype)
recast_cols = {col: dtype for col, dtype in zip(self.
_required_columns, self._required_dtypes) if not ok_dtype(
col, dtype)}
if recast_cols:
data_table = data_table.astype(recast_cols)
self.data = data_table
self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict
) else {}
<mask token>
<mask token>
@classmethod
def from_rows(cls, rows: Iterable, columns: Optional[Sequence[str]]=
None, meta_dict: Optional[Mapping]=None):
"""Create a new instance from a list of rows, as tuples or arrays."""
if columns is None:
columns = cls._required_columns
table = pd.DataFrame.from_records(rows, columns=columns)
return cls(table, meta_dict)
def as_columns(self, **columns):
"""Wrap the named columns in this instance's metadata."""
return self.__class__.from_columns(columns, self.meta)
def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):
"""Wrap the given pandas DataFrame in this instance's metadata."""
if reset_index:
dframe = dframe.reset_index(drop=True)
return self.__class__(dframe, self.meta.copy())
def as_series(self, arraylike: Iterable) ->pd.Series:
"""Coerce `arraylike` to a Series with this instance's index."""
return pd.Series(arraylike, index=self.data.index)
<mask token>
def __bool__(self) ->bool:
return bool(len(self.data))
def __eq__(self, other) ->bool:
return isinstance(other, self.__class__) and self.data.equals(other
.data)
def __len__(self) ->int:
return len(self.data)
def __contains__(self, key) ->bool:
return key in self.data.columns
def __getitem__(self, index) ->Union[pd.Series, pd.DataFrame]:
"""Access a portion of the data.
Cases:
- single integer: a row, as pd.Series
- string row name: a column, as pd.Series
- a boolean array: masked rows, as_dataframe
- tuple of integers: selected rows, as_dataframe
"""
if isinstance(index, int):
return self.data.iloc[index]
if isinstance(index, str):
return self.data[index]
if isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
return self.data.loc[index]
if isinstance(index, slice):
return self.as_dataframe(self.data[index])
try:
if isinstance(index, type(None)) or len(index) == 0:
empty = pd.DataFrame(columns=self.data.columns)
return self.as_dataframe(empty)
except TypeError as exc:
raise TypeError(
f'object of type {type(index)!r} cannot be used as an index into a {self.__class__.__name__}'
) from exc
return self.as_dataframe(self.data[index])
def __setitem__(self, index, value):
"""Assign to a portion of the data."""
if isinstance(index, int):
self.data.iloc[index] = value
elif isinstance(index, str):
self.data[index] = value
elif isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
self.data.loc[index] = value
else:
assert isinstance(index, slice) or len(index) > 0
self.data[index] = value
def __delitem__(self, index):
return NotImplemented
def __iter__(self):
return self.data.itertuples(index=False)
<mask token>
@property
def chromosome(self) ->pd.Series:
"""Get column 'chromosome'."""
return self.data['chromosome']
@property
def start(self) ->pd.Series:
"""Get column 'start'."""
return self.data['start']
@property
def end(self) ->pd.Series:
"""Get column 'end'."""
return self.data['end']
<mask token>
def autosomes(self, also=None):
"""Select chromosomes w/ integer names, ignoring any 'chr' prefixes."""
is_auto = self.chromosome.str.match('(chr)?\\d+$', na=False)
if not is_auto.any():
return self
if also is not None:
if isinstance(also, pd.Series):
is_auto |= also
else:
if isinstance(also, str):
also = [also]
for a_chrom in also:
is_auto |= self.chromosome == a_chrom
return self[is_auto]
<mask token>
def by_chromosome(self) ->Iterator:
"""Iterate over bins grouped by chromosome name."""
for chrom, subtable in self.data.groupby('chromosome', sort=False):
yield chrom, self.as_dataframe(subtable)
def by_ranges(self, other, mode: str='outer', keep_empty: bool=True
) ->Iterator:
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
for bin_row, subrange in by_ranges(self.data, other.data, mode,
keep_empty):
if len(subrange):
yield bin_row, self.as_dataframe(subrange)
elif keep_empty:
yield bin_row, self.as_rows(subrange)
def coords(self, also: Union[str, Iterable[str]]=()):
"""Iterate over plain coordinates of each bin: chromosome, start, end.
Parameters
----------
also : str, or iterable of strings
Also include these columns from `self`, in addition to chromosome,
start, and end.
Example, yielding rows in BED format:
>>> probes.coords(also=["gene", "strand"])
"""
cols = list(GenomicArray._required_columns)
if also:
if isinstance(also, str):
cols.append(also)
else:
cols.extend(also)
coordframe = self.data.loc[:, cols]
return coordframe.itertuples(index=False)
<mask token>
def in_range(self, chrom: Optional[str]=None, start: Optional[Numeric]=
None, end: Optional[Numeric]=None, mode: str='outer'):
"""Get the GenomicArray portion within the given genomic range.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
start : int or None
Start coordinate of range to select, in 0-based coordinates.
If None, start from 0.
end : int or None
End coordinate of range to select. If None, select to the end of the
chromosome.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
The subset of `self` enclosed by the specified range.
"""
starts = [int(start)] if start is not None else None
ends = [int(end)] if end is not None else None
results = iter_ranges(self.data, chrom, starts, ends, mode)
return self.as_dataframe(next(results))
def in_ranges(self, chrom: Optional[str]=None, starts: Optional[
Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,
mode: str='outer'):
"""Get the GenomicArray portion within the specified ranges.
Similar to `in_ranges`, but concatenating the selections of all the
regions specified by the `starts` and `ends` arrays.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
starts : int array, or None
Start coordinates of ranges to select, in 0-based coordinates.
If None, start from 0.
ends : int array, or None
End coordinates of ranges to select. If None, select to the end of the
chromosome. If `starts` and `ends` are both specified, they must be
arrays of equal length.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
Concatenation of all the subsets of `self` enclosed by the specified
ranges.
"""
table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),
sort=False)
return self.as_dataframe(table)
def into_ranges(self, other, column: str, default, summary_func:
Optional[Callable]=None):
"""Re-bin values from `column` into the corresponding ranges in `other`.
Match overlapping/intersecting rows from `other` to each row in `self`.
Then, within each range in `other`, extract the value(s) from `column`
in `self`, using the function `summary_func` to produce a single value
if multiple bins in `self` map to a single range in `other`.
For example, group SNVs (self) by CNV segments (other) and calculate the
median (summary_func) of each SNV group's allele frequencies.
Parameters
----------
other : GenomicArray
Ranges into which the overlapping values of `self` will be
summarized.
column : string
Column name in `self` to extract values from.
default
Value to assign to indices in `other` that do not overlap any bins in
`self`. Type should be the same as or compatible with the output
field specified by `column`, or the output of `summary_func`.
summary_func : callable, dict of string-to-callable, or None
Specify how to reduce 1 or more `other` rows into a single value for
the corresponding row in `self`.
- If callable, apply to the `column` field each group of rows in
`other` column.
- If a single-element dict of column name to callable, apply to that
field in `other` instead of `column`.
- If None, use an appropriate summarizing function for the datatype
of the `column` column in `other` (e.g. median of numbers,
concatenation of strings).
- If some other value, assign that value to `self` wherever there is
an overlap.
Returns
-------
pd.Series
The extracted and summarized values from `self` corresponding to
other's genomic ranges, the same length as `other`.
"""
if column not in self:
logging.warning("No '%s' column available for summary calculation",
column)
return pd.Series(np.repeat(default, len(other)))
return into_ranges(self.data, other.data, column, default, summary_func
)
def iter_ranges_of(self, other, column: str, mode: str='outer',
keep_empty: bool=True):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
column : string
Column name in `self` to extract values from.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
if column not in self.data.columns:
raise ValueError(f'No column named {column!r} in this object')
ser = self.data[column]
for slc in iter_slices(self.data, other.data, mode, keep_empty):
yield ser[slc]
<mask token>
<mask token>
def copy(self):
"""Create an independent copy of this object."""
return self.as_dataframe(self.data.copy())
def add_columns(self, **columns):
"""Add the given columns to a copy of this GenomicArray.
Parameters
----------
**columns : array
Keyword arguments where the key is the new column's name and the
value is an array of the same length as `self` which will be the new
column's values.
Returns
-------
GenomicArray or subclass
A new instance of `self` with the given columns included in the
underlying dataframe.
"""
return self.as_dataframe(self.data.assign(**columns))
def keep_columns(self, colnames):
"""Extract a subset of columns, reusing this instance's metadata."""
colnames = self.data.columns.intersection(colnames)
return self.__class__(self.data.loc[:, colnames], self.meta.copy())
<mask token>
def filter(self, func=None, **kwargs):
"""Take a subset of rows where the given condition is true.
Parameters
----------
func : callable
A boolean function which will be applied to each row to keep rows
where the result is True.
**kwargs : string
Keyword arguments like ``chromosome="chr7"`` or
``gene="Antitarget"``, which will keep rows where the keyed field
equals the specified value.
Return
------
GenomicArray
Subset of `self` where the specified condition is True.
"""
table = self.data
if func is not None:
table = table[table.apply(func, axis=1)]
for key, val in list(kwargs.items()):
assert key in self
table = table[table[key] == val]
return self.as_dataframe(table)
def shuffle(self):
"""Randomize the order of bins in this array (in-place)."""
order = np.arange(len(self.data))
np.random.seed(679661)
np.random.shuffle(order)
self.data = self.data.iloc[order]
return order
def sort(self):
"""Sort this array's bins in-place, with smart chromosome ordering."""
sort_key = self.data.chromosome.apply(sorter_chrom)
self.data = self.data.assign(_sort_key_=sort_key).sort_values(by=[
'_sort_key_', 'start', 'end'], kind='mergesort').drop('_sort_key_',
axis=1).reset_index(drop=True)
def sort_columns(self):
"""Sort this array's columns in-place, per class definition."""
extra_cols = []
for col in self.data.columns:
if col not in self._required_columns:
extra_cols.append(col)
sorted_colnames = list(self._required_columns) + sorted(extra_cols)
assert len(sorted_colnames) == len(self.data.columns)
self.data = self.data.reindex(columns=sorted_colnames)
def cut(self, other, combine=None):
"""Split this array's regions at the boundaries in `other`."""
return NotImplemented
<mask token>
<mask token>
def merge(self, bp: int=0, stranded: bool=False, combine: Optional[Dict
[str, Callable]]=None):
"""Merge adjacent or overlapping regions into single rows.
Similar to 'bedtools merge'.
"""
return self.as_dataframe(merge(self.data, bp, stranded, combine))
def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,
Numeric]]=None):
"""Resize each genomic bin by a fixed number of bases at each end.
Bin 'start' values have a minimum of 0, and `chrom_sizes` can
specify each chromosome's maximum 'end' value.
Similar to 'bedtools slop'.
Parameters
----------
bp : int
Number of bases in each direction to expand or shrink each bin.
Applies to 'start' and 'end' values symmetrically, and may be
positive (expand) or negative (shrink).
chrom_sizes : dict of string-to-int
Chromosome name to length in base pairs. If given, all chromosomes
in `self` must be included.
"""
table = self.data
limits = {'lower': 0}
if chrom_sizes:
limits['upper'] = self.chromosome.replace(chrom_sizes)
table = table.assign(start=(table['start'] - bp).clip(**limits),
end=(table['end'] + bp).clip(**limits))
if bp < 0:
ok_size = table['end'] - table['start'] > 0
logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())
table = table[ok_size]
return self.as_dataframe(table.copy())
def squash(self, combine=None):
"""Combine some groups of rows, by some criteria, into single rows."""
return NotImplemented
def subdivide(self, avg_size: int, min_size: int=0, verbose: bool=False):
"""Split this array's regions into roughly equal-sized sub-regions."""
return self.as_dataframe(subdivide(self.data, avg_size, min_size,
verbose))
<mask token>
def total_range_size(self) ->int:
"""Total number of bases covered by all (merged) regions."""
if not len(self):
return 0
regions = merge(self.data, bp=1)
return regions.end.sum() - regions.start.sum()
def _get_gene_map(self) ->OrderedDict:
"""Map unique gene names to their indices in this array.
Returns
-------
OrderedDict
An (ordered) dictionary of unique gene names and the data indices of
their segments in the order of occurrence (genomic order).
"""
if 'gene' not in self.data:
return OrderedDict()
genes: OrderedDict = OrderedDict()
for idx, genestr in self.data['gene'].items():
if pd.isnull(genestr):
continue
for gene in genestr.split(','):
if gene not in genes:
genes[gene] = []
genes[gene].append(idx)
return genes
|
<mask token>
class GenomicArray:
<mask token>
<mask token>
<mask token>
def __init__(self, data_table: Optional[Union[Sequence, pd.DataFrame]],
meta_dict: Optional[Mapping]=None):
if data_table is None or isinstance(data_table, (list, tuple)
) and not len(data_table) or isinstance(data_table, pd.DataFrame
) and not len(data_table.columns):
data_table = self._make_blank()
else:
if not isinstance(data_table, pd.DataFrame):
data_table = pd.DataFrame(data_table)
if not all(c in data_table.columns for c in self._required_columns
):
raise ValueError('data table must have at least columns ' +
f'{self._required_columns!r}; got {tuple(data_table.columns)!r}'
)
if len(data_table):
def ok_dtype(col, dtype):
return isinstance(data_table[col].iat[0], dtype)
else:
def ok_dtype(col, dtype):
return data_table[col].dtype == np.dtype(dtype)
recast_cols = {col: dtype for col, dtype in zip(self.
_required_columns, self._required_dtypes) if not ok_dtype(
col, dtype)}
if recast_cols:
data_table = data_table.astype(recast_cols)
self.data = data_table
self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict
) else {}
<mask token>
@classmethod
def from_columns(cls, columns: Mapping[str, Iterable], meta_dict:
Optional[Mapping]=None):
"""Create a new instance from column arrays, given as a dict."""
table = pd.DataFrame.from_dict(columns)
ary = cls(table, meta_dict)
ary.sort_columns()
return ary
@classmethod
def from_rows(cls, rows: Iterable, columns: Optional[Sequence[str]]=
None, meta_dict: Optional[Mapping]=None):
"""Create a new instance from a list of rows, as tuples or arrays."""
if columns is None:
columns = cls._required_columns
table = pd.DataFrame.from_records(rows, columns=columns)
return cls(table, meta_dict)
def as_columns(self, **columns):
"""Wrap the named columns in this instance's metadata."""
return self.__class__.from_columns(columns, self.meta)
def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):
"""Wrap the given pandas DataFrame in this instance's metadata."""
if reset_index:
dframe = dframe.reset_index(drop=True)
return self.__class__(dframe, self.meta.copy())
def as_series(self, arraylike: Iterable) ->pd.Series:
"""Coerce `arraylike` to a Series with this instance's index."""
return pd.Series(arraylike, index=self.data.index)
<mask token>
def __bool__(self) ->bool:
return bool(len(self.data))
def __eq__(self, other) ->bool:
return isinstance(other, self.__class__) and self.data.equals(other
.data)
def __len__(self) ->int:
return len(self.data)
def __contains__(self, key) ->bool:
return key in self.data.columns
def __getitem__(self, index) ->Union[pd.Series, pd.DataFrame]:
"""Access a portion of the data.
Cases:
- single integer: a row, as pd.Series
- string row name: a column, as pd.Series
- a boolean array: masked rows, as_dataframe
- tuple of integers: selected rows, as_dataframe
"""
if isinstance(index, int):
return self.data.iloc[index]
if isinstance(index, str):
return self.data[index]
if isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
return self.data.loc[index]
if isinstance(index, slice):
return self.as_dataframe(self.data[index])
try:
if isinstance(index, type(None)) or len(index) == 0:
empty = pd.DataFrame(columns=self.data.columns)
return self.as_dataframe(empty)
except TypeError as exc:
raise TypeError(
f'object of type {type(index)!r} cannot be used as an index into a {self.__class__.__name__}'
) from exc
return self.as_dataframe(self.data[index])
def __setitem__(self, index, value):
"""Assign to a portion of the data."""
if isinstance(index, int):
self.data.iloc[index] = value
elif isinstance(index, str):
self.data[index] = value
elif isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
self.data.loc[index] = value
else:
assert isinstance(index, slice) or len(index) > 0
self.data[index] = value
def __delitem__(self, index):
return NotImplemented
def __iter__(self):
return self.data.itertuples(index=False)
<mask token>
@property
def chromosome(self) ->pd.Series:
"""Get column 'chromosome'."""
return self.data['chromosome']
@property
def start(self) ->pd.Series:
"""Get column 'start'."""
return self.data['start']
@property
def end(self) ->pd.Series:
"""Get column 'end'."""
return self.data['end']
<mask token>
def autosomes(self, also=None):
"""Select chromosomes w/ integer names, ignoring any 'chr' prefixes."""
is_auto = self.chromosome.str.match('(chr)?\\d+$', na=False)
if not is_auto.any():
return self
if also is not None:
if isinstance(also, pd.Series):
is_auto |= also
else:
if isinstance(also, str):
also = [also]
for a_chrom in also:
is_auto |= self.chromosome == a_chrom
return self[is_auto]
<mask token>
def by_chromosome(self) ->Iterator:
"""Iterate over bins grouped by chromosome name."""
for chrom, subtable in self.data.groupby('chromosome', sort=False):
yield chrom, self.as_dataframe(subtable)
def by_ranges(self, other, mode: str='outer', keep_empty: bool=True
) ->Iterator:
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
for bin_row, subrange in by_ranges(self.data, other.data, mode,
keep_empty):
if len(subrange):
yield bin_row, self.as_dataframe(subrange)
elif keep_empty:
yield bin_row, self.as_rows(subrange)
def coords(self, also: Union[str, Iterable[str]]=()):
"""Iterate over plain coordinates of each bin: chromosome, start, end.
Parameters
----------
also : str, or iterable of strings
Also include these columns from `self`, in addition to chromosome,
start, and end.
Example, yielding rows in BED format:
>>> probes.coords(also=["gene", "strand"])
"""
cols = list(GenomicArray._required_columns)
if also:
if isinstance(also, str):
cols.append(also)
else:
cols.extend(also)
coordframe = self.data.loc[:, cols]
return coordframe.itertuples(index=False)
<mask token>
def in_range(self, chrom: Optional[str]=None, start: Optional[Numeric]=
None, end: Optional[Numeric]=None, mode: str='outer'):
"""Get the GenomicArray portion within the given genomic range.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
start : int or None
Start coordinate of range to select, in 0-based coordinates.
If None, start from 0.
end : int or None
End coordinate of range to select. If None, select to the end of the
chromosome.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
The subset of `self` enclosed by the specified range.
"""
starts = [int(start)] if start is not None else None
ends = [int(end)] if end is not None else None
results = iter_ranges(self.data, chrom, starts, ends, mode)
return self.as_dataframe(next(results))
def in_ranges(self, chrom: Optional[str]=None, starts: Optional[
Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,
mode: str='outer'):
"""Get the GenomicArray portion within the specified ranges.
Similar to `in_ranges`, but concatenating the selections of all the
regions specified by the `starts` and `ends` arrays.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
starts : int array, or None
Start coordinates of ranges to select, in 0-based coordinates.
If None, start from 0.
ends : int array, or None
End coordinates of ranges to select. If None, select to the end of the
chromosome. If `starts` and `ends` are both specified, they must be
arrays of equal length.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
Concatenation of all the subsets of `self` enclosed by the specified
ranges.
"""
table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),
sort=False)
return self.as_dataframe(table)
def into_ranges(self, other, column: str, default, summary_func:
Optional[Callable]=None):
"""Re-bin values from `column` into the corresponding ranges in `other`.
Match overlapping/intersecting rows from `other` to each row in `self`.
Then, within each range in `other`, extract the value(s) from `column`
in `self`, using the function `summary_func` to produce a single value
if multiple bins in `self` map to a single range in `other`.
For example, group SNVs (self) by CNV segments (other) and calculate the
median (summary_func) of each SNV group's allele frequencies.
Parameters
----------
other : GenomicArray
Ranges into which the overlapping values of `self` will be
summarized.
column : string
Column name in `self` to extract values from.
default
Value to assign to indices in `other` that do not overlap any bins in
`self`. Type should be the same as or compatible with the output
field specified by `column`, or the output of `summary_func`.
summary_func : callable, dict of string-to-callable, or None
Specify how to reduce 1 or more `other` rows into a single value for
the corresponding row in `self`.
- If callable, apply to the `column` field each group of rows in
`other` column.
- If a single-element dict of column name to callable, apply to that
field in `other` instead of `column`.
- If None, use an appropriate summarizing function for the datatype
of the `column` column in `other` (e.g. median of numbers,
concatenation of strings).
- If some other value, assign that value to `self` wherever there is
an overlap.
Returns
-------
pd.Series
The extracted and summarized values from `self` corresponding to
other's genomic ranges, the same length as `other`.
"""
if column not in self:
logging.warning("No '%s' column available for summary calculation",
column)
return pd.Series(np.repeat(default, len(other)))
return into_ranges(self.data, other.data, column, default, summary_func
)
def iter_ranges_of(self, other, column: str, mode: str='outer',
keep_empty: bool=True):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
column : string
Column name in `self` to extract values from.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
if column not in self.data.columns:
raise ValueError(f'No column named {column!r} in this object')
ser = self.data[column]
for slc in iter_slices(self.data, other.data, mode, keep_empty):
yield ser[slc]
<mask token>
<mask token>
def copy(self):
"""Create an independent copy of this object."""
return self.as_dataframe(self.data.copy())
def add_columns(self, **columns):
"""Add the given columns to a copy of this GenomicArray.
Parameters
----------
**columns : array
Keyword arguments where the key is the new column's name and the
value is an array of the same length as `self` which will be the new
column's values.
Returns
-------
GenomicArray or subclass
A new instance of `self` with the given columns included in the
underlying dataframe.
"""
return self.as_dataframe(self.data.assign(**columns))
def keep_columns(self, colnames):
"""Extract a subset of columns, reusing this instance's metadata."""
colnames = self.data.columns.intersection(colnames)
return self.__class__(self.data.loc[:, colnames], self.meta.copy())
<mask token>
def filter(self, func=None, **kwargs):
"""Take a subset of rows where the given condition is true.
Parameters
----------
func : callable
A boolean function which will be applied to each row to keep rows
where the result is True.
**kwargs : string
Keyword arguments like ``chromosome="chr7"`` or
``gene="Antitarget"``, which will keep rows where the keyed field
equals the specified value.
Return
------
GenomicArray
Subset of `self` where the specified condition is True.
"""
table = self.data
if func is not None:
table = table[table.apply(func, axis=1)]
for key, val in list(kwargs.items()):
assert key in self
table = table[table[key] == val]
return self.as_dataframe(table)
def shuffle(self):
"""Randomize the order of bins in this array (in-place)."""
order = np.arange(len(self.data))
np.random.seed(679661)
np.random.shuffle(order)
self.data = self.data.iloc[order]
return order
def sort(self):
"""Sort this array's bins in-place, with smart chromosome ordering."""
sort_key = self.data.chromosome.apply(sorter_chrom)
self.data = self.data.assign(_sort_key_=sort_key).sort_values(by=[
'_sort_key_', 'start', 'end'], kind='mergesort').drop('_sort_key_',
axis=1).reset_index(drop=True)
def sort_columns(self):
"""Sort this array's columns in-place, per class definition."""
extra_cols = []
for col in self.data.columns:
if col not in self._required_columns:
extra_cols.append(col)
sorted_colnames = list(self._required_columns) + sorted(extra_cols)
assert len(sorted_colnames) == len(self.data.columns)
self.data = self.data.reindex(columns=sorted_colnames)
def cut(self, other, combine=None):
"""Split this array's regions at the boundaries in `other`."""
return NotImplemented
<mask token>
def intersection(self, other, mode: str='outer'):
"""Select the bins in `self` that overlap the regions in `other`.
The extra fields of `self`, but not `other`, are retained in the output.
"""
if mode == 'trim':
chunks = [chunk.data for _, chunk in self.by_ranges(other, mode
=mode, keep_empty=False)]
return self.as_dataframe(pd.concat(chunks))
slices = iter_slices(self.data, other.data, mode, False)
indices = np.concatenate(list(slices))
return self.as_dataframe(self.data.loc[indices])
def merge(self, bp: int=0, stranded: bool=False, combine: Optional[Dict
[str, Callable]]=None):
"""Merge adjacent or overlapping regions into single rows.
Similar to 'bedtools merge'.
"""
return self.as_dataframe(merge(self.data, bp, stranded, combine))
def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,
Numeric]]=None):
"""Resize each genomic bin by a fixed number of bases at each end.
Bin 'start' values have a minimum of 0, and `chrom_sizes` can
specify each chromosome's maximum 'end' value.
Similar to 'bedtools slop'.
Parameters
----------
bp : int
Number of bases in each direction to expand or shrink each bin.
Applies to 'start' and 'end' values symmetrically, and may be
positive (expand) or negative (shrink).
chrom_sizes : dict of string-to-int
Chromosome name to length in base pairs. If given, all chromosomes
in `self` must be included.
"""
table = self.data
limits = {'lower': 0}
if chrom_sizes:
limits['upper'] = self.chromosome.replace(chrom_sizes)
table = table.assign(start=(table['start'] - bp).clip(**limits),
end=(table['end'] + bp).clip(**limits))
if bp < 0:
ok_size = table['end'] - table['start'] > 0
logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())
table = table[ok_size]
return self.as_dataframe(table.copy())
def squash(self, combine=None):
"""Combine some groups of rows, by some criteria, into single rows."""
return NotImplemented
def subdivide(self, avg_size: int, min_size: int=0, verbose: bool=False):
"""Split this array's regions into roughly equal-sized sub-regions."""
return self.as_dataframe(subdivide(self.data, avg_size, min_size,
verbose))
<mask token>
def total_range_size(self) ->int:
"""Total number of bases covered by all (merged) regions."""
if not len(self):
return 0
regions = merge(self.data, bp=1)
return regions.end.sum() - regions.start.sum()
def _get_gene_map(self) ->OrderedDict:
"""Map unique gene names to their indices in this array.
Returns
-------
OrderedDict
An (ordered) dictionary of unique gene names and the data indices of
their segments in the order of occurrence (genomic order).
"""
if 'gene' not in self.data:
return OrderedDict()
genes: OrderedDict = OrderedDict()
for idx, genestr in self.data['gene'].items():
if pd.isnull(genestr):
continue
for gene in genestr.split(','):
if gene not in genes:
genes[gene] = []
genes[gene].append(idx)
return genes
|
"""Base class for an array of annotated genomic regions."""
import logging
from typing import Callable, Dict, Iterable, Iterator, Mapping, Optional, Sequence, Union
from collections import OrderedDict
import numpy as np
import pandas as pd
from .chromsort import sorter_chrom
from .intersect import by_ranges, into_ranges, iter_ranges, iter_slices, Numeric
from .merge import flatten, merge
from .rangelabel import to_label
from .subtract import subtract
from .subdivide import subdivide
class GenomicArray:
"""An array of genomic intervals. Base class for genomic data structures.
Can represent most BED-like tabular formats with arbitrary additional
columns.
"""
_required_columns = ("chromosome", "start", "end")
_required_dtypes = (str, int, int)
def __init__(
self,
data_table: Optional[Union[Sequence, pd.DataFrame]],
meta_dict: Optional[Mapping] = None,
):
# Validation
if (
data_table is None
or (isinstance(data_table, (list, tuple)) and not len(data_table))
or (isinstance(data_table, pd.DataFrame) and not len(data_table.columns))
):
data_table = self._make_blank()
else:
if not isinstance(data_table, pd.DataFrame):
# Rarely if ever needed -- prefer from_rows, from_columns, etc.
data_table = pd.DataFrame(data_table)
if not all(c in data_table.columns for c in self._required_columns):
raise ValueError(
"data table must have at least columns "
+ f"{self._required_columns!r}; got {tuple(data_table.columns)!r}"
)
# Ensure columns are the right type
# (in case they've been automatically converted to the wrong type,
# e.g. chromosome names as integers; genome coordinates as floats)
if len(data_table):
def ok_dtype(col, dtype):
return isinstance(data_table[col].iat[0], dtype)
else:
def ok_dtype(col, dtype):
return data_table[col].dtype == np.dtype(dtype)
recast_cols = {
col: dtype
for col, dtype in zip(self._required_columns, self._required_dtypes)
if not ok_dtype(col, dtype)
}
if recast_cols:
data_table = data_table.astype(recast_cols)
self.data = data_table
self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict) else {}
@classmethod
def _make_blank(cls) -> pd.DataFrame:
"""Create an empty dataframe with the columns required by this class."""
spec = list(zip(cls._required_columns, cls._required_dtypes))
try:
arr = np.zeros(0, dtype=spec)
return pd.DataFrame(arr)
except TypeError as exc:
raise TypeError(r"{exc}: {spec}") from exc
@classmethod
def from_columns(
cls, columns: Mapping[str, Iterable], meta_dict: Optional[Mapping] = None
):
"""Create a new instance from column arrays, given as a dict."""
table = pd.DataFrame.from_dict(columns)
ary = cls(table, meta_dict)
ary.sort_columns()
return ary
@classmethod
def from_rows(
cls,
rows: Iterable,
columns: Optional[Sequence[str]] = None,
meta_dict: Optional[Mapping] = None,
):
"""Create a new instance from a list of rows, as tuples or arrays."""
if columns is None:
columns = cls._required_columns
table = pd.DataFrame.from_records(rows, columns=columns)
return cls(table, meta_dict)
def as_columns(self, **columns):
"""Wrap the named columns in this instance's metadata."""
return self.__class__.from_columns(columns, self.meta)
# return self.__class__(self.data.loc[:, columns], self.meta.copy())
def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool = False):
"""Wrap the given pandas DataFrame in this instance's metadata."""
if reset_index:
dframe = dframe.reset_index(drop=True)
return self.__class__(dframe, self.meta.copy())
def as_series(self, arraylike: Iterable) -> pd.Series:
"""Coerce `arraylike` to a Series with this instance's index."""
return pd.Series(arraylike, index=self.data.index)
def as_rows(self, rows: Iterable):
"""Wrap the given rows in this instance's metadata."""
try:
out = self.from_rows(rows, columns=self.data.columns, meta_dict=self.meta)
except AssertionError as exc:
columns = self.data.columns.tolist()
firstrow = next(iter(rows))
raise RuntimeError(
f"Passed {len(columns)} columns {columns!r}, but "
f"{len(firstrow)} elements in first row: {firstrow}"
) from exc
return out
# Container behaviour
def __bool__(self) -> bool:
return bool(len(self.data))
def __eq__(self, other) -> bool:
return isinstance(other, self.__class__) and self.data.equals(other.data)
def __len__(self) -> int:
return len(self.data)
def __contains__(self, key) -> bool:
return key in self.data.columns
def __getitem__(self, index) -> Union[pd.Series, pd.DataFrame]:
"""Access a portion of the data.
Cases:
- single integer: a row, as pd.Series
- string row name: a column, as pd.Series
- a boolean array: masked rows, as_dataframe
- tuple of integers: selected rows, as_dataframe
"""
if isinstance(index, int):
# A single row
return self.data.iloc[index]
# return self.as_dataframe(self.data.iloc[index:index+1])
if isinstance(index, str):
# A column, by name
return self.data[index]
if (
isinstance(index, tuple)
and len(index) == 2
and index[1] in self.data.columns
):
# Row index, column index -> cell value
return self.data.loc[index]
if isinstance(index, slice):
# return self.as_dataframe(self.data.take(index))
return self.as_dataframe(self.data[index])
# Iterable -- selected row indices or boolean array, probably
try:
if isinstance(index, type(None)) or len(index) == 0:
empty = pd.DataFrame(columns=self.data.columns)
return self.as_dataframe(empty)
except TypeError as exc:
raise TypeError(
f"object of type {type(index)!r} "
f"cannot be used as an index into a {self.__class__.__name__}"
) from exc
return self.as_dataframe(self.data[index])
# return self.as_dataframe(self.data.take(index))
def __setitem__(self, index, value):
"""Assign to a portion of the data."""
if isinstance(index, int):
self.data.iloc[index] = value
elif isinstance(index, str):
self.data[index] = value
elif (
isinstance(index, tuple)
and len(index) == 2
and index[1] in self.data.columns
):
self.data.loc[index] = value
else:
assert isinstance(index, slice) or len(index) > 0
self.data[index] = value
def __delitem__(self, index):
return NotImplemented
def __iter__(self):
return self.data.itertuples(index=False)
__next__ = next
@property
def chromosome(self) -> pd.Series:
"""Get column 'chromosome'."""
return self.data["chromosome"]
@property
def start(self) -> pd.Series:
"""Get column 'start'."""
return self.data["start"]
@property
def end(self) -> pd.Series:
"""Get column 'end'."""
return self.data["end"]
@property
def sample_id(self) -> pd.Series:
"""Get metadata field 'sample_id'."""
return self.meta.get("sample_id")
# Traversal
def autosomes(self, also=None):
"""Select chromosomes w/ integer names, ignoring any 'chr' prefixes."""
is_auto = self.chromosome.str.match(r"(chr)?\d+$", na=False)
if not is_auto.any():
# The autosomes, if any, are not named with plain integers
return self
if also is not None:
if isinstance(also, pd.Series):
is_auto |= also
else:
# The assumption is that `also` is a single chromosome name or an iterable thereof.
if isinstance(also, str):
also = [also]
for a_chrom in also:
is_auto |= self.chromosome == a_chrom
return self[is_auto]
def by_arm(self, min_gap_size: Union[int, float] = 1e5, min_arm_bins: int = 50):
"""Iterate over bins grouped by chromosome arm (inferred)."""
# ENH:
# - Accept GArray of actual centromere regions as input
# -> find largest gap (any size) within cmere region, split there
# - Cache centromere locations once found
self.data.chromosome = self.data.chromosome.astype(str)
for chrom, subtable in self.data.groupby("chromosome", sort=False):
margin = max(min_arm_bins, int(round(0.1 * len(subtable))))
if len(subtable) > 2 * margin + 1:
# Found a candidate centromere
gaps = (
subtable.start.values[margin + 1 : -margin]
- subtable.end.values[margin : -margin - 1]
)
cmere_idx = gaps.argmax() + margin + 1
cmere_size = gaps[cmere_idx - margin - 1]
else:
cmere_idx = 0
cmere_size = 0
if cmere_idx and cmere_size >= min_gap_size:
logging.debug(
"%s centromere at %d of %d bins (size %s)",
chrom,
cmere_idx,
len(subtable),
cmere_size,
)
p_arm = subtable.index[:cmere_idx]
yield chrom, self.as_dataframe(subtable.loc[p_arm, :])
q_arm = subtable.index[cmere_idx:]
yield chrom, self.as_dataframe(subtable.loc[q_arm, :])
else:
# No centromere found -- emit the whole chromosome
if cmere_idx:
logging.debug(
"%s: Ignoring centromere at %d of %d bins (size %s)",
chrom,
cmere_idx,
len(subtable),
cmere_size,
)
else:
logging.debug("%s: Skipping centromere search, too small", chrom)
yield chrom, self.as_dataframe(subtable)
def by_chromosome(self) -> Iterator:
"""Iterate over bins grouped by chromosome name."""
for chrom, subtable in self.data.groupby("chromosome", sort=False):
yield chrom, self.as_dataframe(subtable)
def by_ranges(
self, other, mode: str = "outer", keep_empty: bool = True
) -> Iterator:
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
for bin_row, subrange in by_ranges(self.data, other.data, mode, keep_empty):
if len(subrange):
yield bin_row, self.as_dataframe(subrange)
elif keep_empty:
yield bin_row, self.as_rows(subrange)
def coords(self, also: Union[str, Iterable[str]] = ()):
"""Iterate over plain coordinates of each bin: chromosome, start, end.
Parameters
----------
also : str, or iterable of strings
Also include these columns from `self`, in addition to chromosome,
start, and end.
Example, yielding rows in BED format:
>>> probes.coords(also=["gene", "strand"])
"""
cols = list(GenomicArray._required_columns)
if also:
if isinstance(also, str):
cols.append(also)
else:
cols.extend(also)
coordframe = self.data.loc[:, cols]
return coordframe.itertuples(index=False)
def labels(self) -> pd.Series:
"""Get chromosomal coordinates as genomic range labels."""
return self.data.apply(to_label, axis=1)
def in_range(
self,
chrom: Optional[str] = None,
start: Optional[Numeric] = None,
end: Optional[Numeric] = None,
mode: str = "outer",
):
"""Get the GenomicArray portion within the given genomic range.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
start : int or None
Start coordinate of range to select, in 0-based coordinates.
If None, start from 0.
end : int or None
End coordinate of range to select. If None, select to the end of the
chromosome.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
The subset of `self` enclosed by the specified range.
"""
starts = [int(start)] if start is not None else None
ends = [int(end)] if end is not None else None
results = iter_ranges(self.data, chrom, starts, ends, mode)
return self.as_dataframe(next(results))
def in_ranges(
self,
chrom: Optional[str] = None,
starts: Optional[Sequence[Numeric]] = None,
ends: Optional[Sequence[Numeric]] = None,
mode: str = "outer",
):
"""Get the GenomicArray portion within the specified ranges.
Similar to `in_ranges`, but concatenating the selections of all the
regions specified by the `starts` and `ends` arrays.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
starts : int array, or None
Start coordinates of ranges to select, in 0-based coordinates.
If None, start from 0.
ends : int array, or None
End coordinates of ranges to select. If None, select to the end of the
chromosome. If `starts` and `ends` are both specified, they must be
arrays of equal length.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
Concatenation of all the subsets of `self` enclosed by the specified
ranges.
"""
table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode), sort=False)
return self.as_dataframe(table)
def into_ranges(
self, other, column: str, default, summary_func: Optional[Callable] = None
):
"""Re-bin values from `column` into the corresponding ranges in `other`.
Match overlapping/intersecting rows from `other` to each row in `self`.
Then, within each range in `other`, extract the value(s) from `column`
in `self`, using the function `summary_func` to produce a single value
if multiple bins in `self` map to a single range in `other`.
For example, group SNVs (self) by CNV segments (other) and calculate the
median (summary_func) of each SNV group's allele frequencies.
Parameters
----------
other : GenomicArray
Ranges into which the overlapping values of `self` will be
summarized.
column : string
Column name in `self` to extract values from.
default
Value to assign to indices in `other` that do not overlap any bins in
`self`. Type should be the same as or compatible with the output
field specified by `column`, or the output of `summary_func`.
summary_func : callable, dict of string-to-callable, or None
Specify how to reduce 1 or more `other` rows into a single value for
the corresponding row in `self`.
- If callable, apply to the `column` field each group of rows in
`other` column.
- If a single-element dict of column name to callable, apply to that
field in `other` instead of `column`.
- If None, use an appropriate summarizing function for the datatype
of the `column` column in `other` (e.g. median of numbers,
concatenation of strings).
- If some other value, assign that value to `self` wherever there is
an overlap.
Returns
-------
pd.Series
The extracted and summarized values from `self` corresponding to
other's genomic ranges, the same length as `other`.
"""
if column not in self:
logging.warning("No '%s' column available for summary calculation", column)
return pd.Series(np.repeat(default, len(other)))
return into_ranges(self.data, other.data, column, default, summary_func)
def iter_ranges_of(
self, other, column: str, mode: str = "outer", keep_empty: bool = True
):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
column : string
Column name in `self` to extract values from.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
if column not in self.data.columns:
raise ValueError(f"No column named {column!r} in this object")
ser = self.data[column]
for slc in iter_slices(self.data, other.data, mode, keep_empty):
yield ser[slc]
# Modification
def add(self, other):
"""Combine this array's data with another GenomicArray (in-place).
Any optional columns must match between both arrays.
"""
if not isinstance(other, self.__class__):
raise ValueError(
f"Argument (type {type(other)}) is not a {self.__class__} instance"
)
if len(other.data):
self.data = pd.concat([self.data, other.data], ignore_index=True)
self.sort()
def concat(self, others):
"""Concatenate several GenomicArrays, keeping this array's metadata.
This array's data table is not implicitly included in the result.
"""
table = pd.concat([otr.data for otr in others], ignore_index=True)
result = self.as_dataframe(table)
result.sort()
return result
def copy(self):
"""Create an independent copy of this object."""
return self.as_dataframe(self.data.copy())
def add_columns(self, **columns):
"""Add the given columns to a copy of this GenomicArray.
Parameters
----------
**columns : array
Keyword arguments where the key is the new column's name and the
value is an array of the same length as `self` which will be the new
column's values.
Returns
-------
GenomicArray or subclass
A new instance of `self` with the given columns included in the
underlying dataframe.
"""
return self.as_dataframe(self.data.assign(**columns))
def keep_columns(self, colnames):
"""Extract a subset of columns, reusing this instance's metadata."""
colnames = self.data.columns.intersection(colnames)
return self.__class__(self.data.loc[:, colnames], self.meta.copy())
def drop_extra_columns(self):
"""Remove any optional columns from this GenomicArray.
Returns
-------
GenomicArray or subclass
A new copy with only the minimal set of columns required by the
class (e.g. chromosome, start, end for GenomicArray; may be more for
subclasses).
"""
table = self.data.loc[:, self._required_columns]
return self.as_dataframe(table)
def filter(self, func=None, **kwargs):
"""Take a subset of rows where the given condition is true.
Parameters
----------
func : callable
A boolean function which will be applied to each row to keep rows
where the result is True.
**kwargs : string
Keyword arguments like ``chromosome="chr7"`` or
``gene="Antitarget"``, which will keep rows where the keyed field
equals the specified value.
Return
------
GenomicArray
Subset of `self` where the specified condition is True.
"""
table = self.data
if func is not None:
table = table[table.apply(func, axis=1)]
for key, val in list(kwargs.items()):
assert key in self
table = table[table[key] == val]
return self.as_dataframe(table)
def shuffle(self):
"""Randomize the order of bins in this array (in-place)."""
order = np.arange(len(self.data))
np.random.seed(0xA5EED)
np.random.shuffle(order)
self.data = self.data.iloc[order]
return order
def sort(self):
"""Sort this array's bins in-place, with smart chromosome ordering."""
sort_key = self.data.chromosome.apply(sorter_chrom)
self.data = (
self.data.assign(_sort_key_=sort_key)
.sort_values(by=["_sort_key_", "start", "end"], kind="mergesort")
.drop("_sort_key_", axis=1)
.reset_index(drop=True)
)
def sort_columns(self):
"""Sort this array's columns in-place, per class definition."""
extra_cols = []
for col in self.data.columns:
if col not in self._required_columns:
extra_cols.append(col)
sorted_colnames = list(self._required_columns) + sorted(extra_cols)
assert len(sorted_colnames) == len(self.data.columns)
self.data = self.data.reindex(columns=sorted_colnames)
# Genome arithmetic
def cut(self, other, combine=None):
"""Split this array's regions at the boundaries in `other`."""
# TODO
return NotImplemented
def flatten(
self,
combine: Optional[Dict[str, Callable]] = None,
split_columns: Optional[Iterable[str]] = None,
):
"""Split this array's regions where they overlap."""
return self.as_dataframe(
flatten(self.data, combine=combine, split_columns=split_columns)
)
def intersection(self, other, mode: str = "outer"):
"""Select the bins in `self` that overlap the regions in `other`.
The extra fields of `self`, but not `other`, are retained in the output.
"""
# TODO options for which extra fields to keep
# by default, keep just the fields in 'table'
if mode == "trim":
# Slower
chunks = [
chunk.data
for _, chunk in self.by_ranges(other, mode=mode, keep_empty=False)
]
return self.as_dataframe(pd.concat(chunks))
# Faster
slices = iter_slices(self.data, other.data, mode, False)
indices = np.concatenate(list(slices))
return self.as_dataframe(self.data.loc[indices])
def merge(
self,
bp: int = 0,
stranded: bool = False,
combine: Optional[Dict[str, Callable]] = None,
):
"""Merge adjacent or overlapping regions into single rows.
Similar to 'bedtools merge'.
"""
return self.as_dataframe(merge(self.data, bp, stranded, combine))
def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str, Numeric]] = None):
"""Resize each genomic bin by a fixed number of bases at each end.
Bin 'start' values have a minimum of 0, and `chrom_sizes` can
specify each chromosome's maximum 'end' value.
Similar to 'bedtools slop'.
Parameters
----------
bp : int
Number of bases in each direction to expand or shrink each bin.
Applies to 'start' and 'end' values symmetrically, and may be
positive (expand) or negative (shrink).
chrom_sizes : dict of string-to-int
Chromosome name to length in base pairs. If given, all chromosomes
in `self` must be included.
"""
table = self.data
limits = {"lower": 0}
if chrom_sizes:
limits["upper"] = self.chromosome.replace(chrom_sizes)
table = table.assign(
start=(table["start"] - bp).clip(**limits),
end=(table["end"] + bp).clip(**limits),
)
if bp < 0:
# Drop any bins that now have zero or negative size
ok_size = table["end"] - table["start"] > 0
logging.debug("Dropping %d bins with size <= 0", (~ok_size).sum())
table = table[ok_size]
# Don't modify the original
return self.as_dataframe(table.copy())
def squash(self, combine=None):
"""Combine some groups of rows, by some criteria, into single rows."""
# TODO
return NotImplemented
def subdivide(self, avg_size: int, min_size: int = 0, verbose: bool = False):
"""Split this array's regions into roughly equal-sized sub-regions."""
return self.as_dataframe(subdivide(self.data, avg_size, min_size, verbose))
def subtract(self, other):
"""Remove the overlapping regions in `other` from this array."""
return self.as_dataframe(subtract(self.data, other.data))
def total_range_size(self) -> int:
"""Total number of bases covered by all (merged) regions."""
if not len(self):
return 0
regions = merge(self.data, bp=1)
return regions.end.sum() - regions.start.sum()
def _get_gene_map(self) -> OrderedDict:
"""Map unique gene names to their indices in this array.
Returns
-------
OrderedDict
An (ordered) dictionary of unique gene names and the data indices of
their segments in the order of occurrence (genomic order).
"""
if "gene" not in self.data:
return OrderedDict()
genes: OrderedDict = OrderedDict()
for idx, genestr in self.data["gene"].items():
if pd.isnull(genestr):
continue
for gene in genestr.split(","):
if gene not in genes:
genes[gene] = []
genes[gene].append(idx)
return genes
|
[
19,
20,
39,
41,
55
] |
2,430 |
5debc97e99bbd78b17e545896d718d4b0eac8519
|
<mask token>
|
<mask token>
app_name = 'cae_web_audio_visual'
urlpatterns = []
|
<mask token>
from django.conf.urls import url
from . import views
app_name = 'cae_web_audio_visual'
urlpatterns = []
|
"""
Urls for CAE_Web Audio_Visual app.
"""
from django.conf.urls import url
from . import views
app_name = 'cae_web_audio_visual'
urlpatterns = [
]
| null |
[
0,
1,
2,
3
] |
2,431 |
af9adc0faad4fc1426a2bd75c1c77e23e37b60bf
|
<mask token>
def f(a):
list1 = []
dict1 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',
(6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}
for i in list(a):
list1.append(dict1[int(i)])
print('-'.join(list1))
<mask token>
def fa(x):
dict2 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',
(6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}
return dict2[int(x)]
<mask token>
|
<mask token>
for i in range(1, n + 1):
factorial = i * factorial
sum += factorial
print(f'阶乘之和{sum}')
<mask token>
for i in range(1, n + 1):
F = math.factorial(i)
sum1 += F
print(f'阶乘之和{sum1}')
<mask token>
def f(a):
list1 = []
dict1 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',
(6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}
for i in list(a):
list1.append(dict1[int(i)])
print('-'.join(list1))
f(str)
<mask token>
def fa(x):
dict2 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',
(6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}
return dict2[int(x)]
<mask token>
print('-'.join(r))
<mask token>
for x in follow_list['data']['follow_list']:
if x['is_vip'] == 1:
print(f"土豪{x['nickname']},我关注了你,给我打赏吧")
for x in follow_list['data']['follow_list']:
if x['is_vip'] == 1 and x['is_friend'] == 0:
print(f"土豪{x['nickname']},我关注了你,给个好友位吧")
|
<mask token>
n = 10
factorial = 1
sum = 0
for i in range(1, n + 1):
factorial = i * factorial
sum += factorial
print(f'阶乘之和{sum}')
sum1 = 0
n = 10
for i in range(1, n + 1):
F = math.factorial(i)
sum1 += F
print(f'阶乘之和{sum1}')
<mask token>
str = '13543897565'
def f(a):
list1 = []
dict1 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',
(6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}
for i in list(a):
list1.append(dict1[int(i)])
print('-'.join(list1))
f(str)
str1 = '13543897565'
def fa(x):
dict2 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',
(6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}
return dict2[int(x)]
r = map(fa, list(str1))
print('-'.join(r))
<mask token>
follow_list = {'status': 'ok', 'data': {'follow_list': [{'user_id':
'32804516', 'nickname': '羽秋璃1111233', 'is_friend': 0, 'is_vip': 1}, {
'user_id': '35742446', 'nickname': '我是你的宝贝哦', 'is_friend': 1, 'is_vip':
1}, {'user_id': '264844', 'nickname': '大鱼噢大鱼', 'is_friend': 0, 'is_vip':
1}, {'user_id': '34362681', 'nickname': '薛一十三', 'is_friend': 0,
'is_vip': 0}]}}
for x in follow_list['data']['follow_list']:
if x['is_vip'] == 1:
print(f"土豪{x['nickname']},我关注了你,给我打赏吧")
for x in follow_list['data']['follow_list']:
if x['is_vip'] == 1 and x['is_friend'] == 0:
print(f"土豪{x['nickname']},我关注了你,给个好友位吧")
|
import math
<mask token>
n = 10
factorial = 1
sum = 0
for i in range(1, n + 1):
factorial = i * factorial
sum += factorial
print(f'阶乘之和{sum}')
sum1 = 0
n = 10
for i in range(1, n + 1):
F = math.factorial(i)
sum1 += F
print(f'阶乘之和{sum1}')
<mask token>
str = '13543897565'
def f(a):
list1 = []
dict1 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',
(6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}
for i in list(a):
list1.append(dict1[int(i)])
print('-'.join(list1))
f(str)
str1 = '13543897565'
def fa(x):
dict2 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',
(6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}
return dict2[int(x)]
r = map(fa, list(str1))
print('-'.join(r))
<mask token>
follow_list = {'status': 'ok', 'data': {'follow_list': [{'user_id':
'32804516', 'nickname': '羽秋璃1111233', 'is_friend': 0, 'is_vip': 1}, {
'user_id': '35742446', 'nickname': '我是你的宝贝哦', 'is_friend': 1, 'is_vip':
1}, {'user_id': '264844', 'nickname': '大鱼噢大鱼', 'is_friend': 0, 'is_vip':
1}, {'user_id': '34362681', 'nickname': '薛一十三', 'is_friend': 0,
'is_vip': 0}]}}
for x in follow_list['data']['follow_list']:
if x['is_vip'] == 1:
print(f"土豪{x['nickname']},我关注了你,给我打赏吧")
for x in follow_list['data']['follow_list']:
if x['is_vip'] == 1 and x['is_friend'] == 0:
print(f"土豪{x['nickname']},我关注了你,给个好友位吧")
|
# -*- coding: utf-8 -*-
# @Time : 2020/6/12 20:19
# @Author : damon
# @Site :
# @File : work0612
# @Software: PyCharm
import math
"""
1、给定n=10,计算1! + 2! + 3! + ... + n!的值
"""
# 解法1:
n = 10
factorial = 1
sum = 0
for i in range(1, n+1):
factorial = i * factorial
sum += factorial
print(f"阶乘之和{sum}")
# 解法2:
sum1 = 0
n = 10
for i in range(1, n + 1):
F = math.factorial(i)
sum1 += F
print(f"阶乘之和{sum1}")
"""
2、给一个数字字符串13543897565,把每一位对应的数字转换成英文数字(例如:“123” -> "one-two-three")
"""
str = '13543897565'
def f(a):
list1 = []
dict1 = {1: "one", 2: "two", 3: "three", 4: "four", 5: "five", 6: "six", 7: "seven", 8: "eight", 9: "nine", 0: "zero"}
for i in list(a):
list1.append(dict1[int(i)])
print("-".join(list1))
f(str)
str1 = '13543897565'
def fa(x):
dict2 = {1: "one", 2: "two", 3: "three", 4: "four", 5: "five",
6: "six", 7: "seven", 8: "eight", 9: "nine", 0: "zero"}
return dict2[int(x)]
r = map(fa, list(str1))
print('-'.join(r))
"""
3、我的关注列表follow_list = {"status":"ok","data":{"follow_list":[{"user_id":"32804516","nickname":"羽秋璃1111233","is_friend":0,"is_vip":1},{"user_id":"35742446","nickname":"我是你的宝贝哦","is_friend":1,"is_vip":1},{"user_id":"264844","nickname":"大鱼噢大鱼","is_friend":0,"is_vip":1},{"user_id":"34362681","nickname":"薛一十三","is_friend":0,"is_vip":0}]}}
(1)如果用户是vip,对用户说“土豪xxx,我关注了你,给个打赏吧”(xxx是用户昵称)
(2)如果用户不是好友关系但是vip(is_friend=0, is_vip=1),对用户说“土豪xxx,我关注了你,给个好友位吧”
"""
follow_list = {"status":"ok","data":{"follow_list":[
{"user_id":"32804516","nickname":"羽秋璃1111233","is_friend":0,"is_vip":1},
{"user_id":"35742446","nickname":"我是你的宝贝哦","is_friend":1,"is_vip":1},
{"user_id":"264844","nickname":"大鱼噢大鱼","is_friend":0,"is_vip":1},
{"user_id":"34362681","nickname":"薛一十三","is_friend":0,"is_vip":0}]}}
for x in follow_list['data']['follow_list']:
if x['is_vip'] == 1:
print(f"土豪{x['nickname']},我关注了你,给我打赏吧")
for x in follow_list['data']['follow_list']:
if x['is_vip'] == 1 and x['is_friend'] == 0:
print(f"土豪{x['nickname']},我关注了你,给个好友位吧")
|
[
2,
3,
4,
5,
6
] |
2,432 |
2ee5991e2b6de6ee48c8207f2b78574fc8a02fc0
|
#! /usr/bin/python
# Project Euler problem 21
"""Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a != b, then a and b are an amicable pair and each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10,000."""
import math
# This is inefficient.
def get_divs(n):
divs = [1]
check = 2
rootn = math.sqrt(n)
while check < rootn:
if n % check == 0:
divs.append(check)
divs.append(n / check)
check += 1
if rootn == check:
divs.append(check)
divs.sort()
return divs
def amicable(a):
b = sum(get_divs(a))
if a == b: return 0
sum_b_divs = sum(get_divs(b))
if a == sum_b_divs:
return b
return 0
print sum([amicable(i) for i in range(1, 10000)])
| null | null | null | null |
[
0
] |
2,433 |
6216a5e45fee8ade5ec9072c42c1b08f3b0f4c65
|
<mask token>
|
class Solution:
<mask token>
|
class Solution:
def validIPAddress(self, IP):
"""
:type IP: str
:rtype: str
"""
def validateIPv4(IP):
digits = IP.split('.')
if len(digits) != 4:
return False
for digitstr in digits:
if len(digitstr) > 3 or len(digitstr) <= 0:
return False
try:
digit = int(digitstr)
except:
return False
if digit > 255 or digit < 0:
return False
if len(str(digit)) != len(digitstr):
return False
return True
def validateIPv6(IP):
hexDigits = IP.split(':')
if len(hexDigits) != 8:
return False
for hexDigitStr in hexDigits:
if len(hexDigitStr) > 4 or len(hexDigitStr) <= 0:
return False
for char in hexDigitStr:
try:
int(char)
except:
if ord(char.lower()) - ord('a') < 0 or ord(char.lower()
) - ord('a') > 5:
return False
return True
if validateIPv4(IP):
return 'IPv4'
elif validateIPv6(IP):
return 'IPv6'
else:
return 'Neither'
|
class Solution:
def validIPAddress(self, IP):
"""
:type IP: str
:rtype: str
"""
def validateIPv4(IP):
digits = IP.split('.')
if len(digits) != 4:
return False
for digitstr in digits:
if len(digitstr) > 3 or len(digitstr) <= 0:
return False
try:
digit = int(digitstr)
except:
return False
# check range
if digit > 255 or digit < 0:
return False
# check leading 0s
if len(str(digit)) != len(digitstr):
return False
return True
def validateIPv6(IP):
hexDigits = IP.split(':')
if len(hexDigits) != 8:
return False
for hexDigitStr in hexDigits:
if len(hexDigitStr) > 4 or len(hexDigitStr) <= 0:
return False
for char in hexDigitStr:
# check hexadecimal digit
try:
int(char)
except:
if ord(char.lower()) - ord('a') < 0 or \
ord(char.lower()) - ord('a') > 5:
return False
return True
if validateIPv4(IP):
return 'IPv4'
elif validateIPv6(IP):
return 'IPv6'
else:
return 'Neither'
# print(Solution().validIPAddress("172.16.254.1"))
# print(Solution().validIPAddress("2001:0db8:85a3:0:0:8A2E:0370:7334"))
# print(Solution().validIPAddress("256.256.256.256"))
# print(Solution().validIPAddress("172.16.254.01"))
# print(Solution().validIPAddress("2001:db8:85a3:0:0:8A2E:0370:7334"))
# print(Solution().validIPAddress("2001:0db8:85a3::8A2E:0370:7334"))
# print(Solution().validIPAddress("10:0df8:85a3:0:0:8a2e:037:7334"))
# print(Solution().validIPAddress("120.25.2.10"))
| null |
[
0,
1,
2,
3
] |
2,434 |
59338170b44be037f749790a7942c2bcca1fc078
|
#!/usr/bin/env python
###############################################################################
#
#
# Project:
# Purpose:
#
#
# Author: Massimo Di Stefano , [email protected]
#
###############################################################################
# Copyright (c) 2009, Massimo Di Stefano <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
__author__ = "Massimo Di Stefano"
__copyright__ = "Copyright 2009, gfoss.it"
__credits__ = [""]
__license__ = "GPL V3"
__version__ = "1.0.0"
__maintainer__ = "Massimo Di Stefano"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = ""
try:
from osgeo import osr, ogr, gdal
except ImportError:
import osr, ogr, gdal
import string
import sys
def GeomType2Name(type):
if type == ogr.wkbUnknown:
return 'wkbUnknown'
elif type == ogr.wkbPoint:
return 'wkbPoint'
elif type == ogr.wkbLineString:
return 'wkbLineString'
elif type == ogr.wkbPolygon:
return 'wkbPolygon'
elif type == ogr.wkbMultiPoint:
return 'wkbMultiPoint'
elif type == ogr.wkbMultiLineString:
return 'wkbMultiLineString'
elif type == ogr.wkbMultiPolygon:
return 'wkbMultiPolygon'
elif type == ogr.wkbGeometryCollection:
return 'wkbGeometryCollection'
elif type == ogr.wkbNone:
return 'wkbNone'
elif type == ogr.wkbLinearRing:
return 'wkbLinearRing'
else:
return 'wkbUnknown'
def Esc(x):
return gdal.EscapeString(x, gdal.CPLES_XML)
def makestile(outfile, brush, pen, size, fill, thickness):
brush = brush.split(',')
pen = pen.split(',')
size = size.split(',')
outfile = outfile.replace('.vrt', '')
outfile = outfile + '.omd'
omd = '// vector file rendering options\n'
omd += 'brush_color: %s %s %s \n' % (brush[0], brush[1], brush[2])
omd += 'pen_color: %s %s %s \n' % (pen[0], pen[1], pen[2])
omd += 'point_width_height: %s %s \n' % (size[0], size[1])
omd += 'fill_flag: %s \n' % (fill)
omd += 'thickness: %s \n' % (thickness)
open(outfile, 'w').write(omd)
def ogrvrt(infile, outfile):
layer_list = []
relative = "0"
schema = 0
print infile
src_ds = ogr.Open(infile, update=0)
if len(layer_list) == 0:
for layer in src_ds:
layer_list.append(layer.GetLayerDefn().GetName())
vrt = '<OGRVRTDataSource>\n'
for name in layer_list:
layer = src_ds.GetLayerByName(name)
layerdef = layer.GetLayerDefn()
vrt += ' <OGRVRTLayer name="%s">\n' % Esc(name)
vrt += ' <SrcDataSource relativeToVRT="%s" shared="%d">%s</SrcDataSource>\n' \
% (relative, not schema, Esc(infile))
if schema:
vrt += ' <SrcLayer>@dummy@</SrcLayer>\n'
else:
vrt += ' <SrcLayer>%s</SrcLayer>\n' % Esc(name)
vrt += ' <GeometryType>%s</GeometryType>\n' \
% GeomType2Name(layerdef.GetGeomType())
srs = layer.GetSpatialRef()
if srs is not None:
vrt += ' <LayerSRS>%s</LayerSRS>\n' \
% (Esc(srs.ExportToWkt()))
# Process all the fields.
for fld_index in range(layerdef.GetFieldCount()):
src_fd = layerdef.GetFieldDefn(fld_index)
if src_fd.GetType() == ogr.OFTInteger:
type = 'Integer'
elif src_fd.GetType() == ogr.OFTString:
type = 'String'
elif src_fd.GetType() == ogr.OFTReal:
type = 'Real'
elif src_fd.GetType() == ogr.OFTStringList:
type = 'StringList'
elif src_fd.GetType() == ogr.OFTIntegerList:
type = 'IntegerList'
elif src_fd.GetType() == ogr.OFTRealList:
type = 'RealList'
elif src_fd.GetType() == ogr.OFTBinary:
type = 'Binary'
elif src_fd.GetType() == ogr.OFTDate:
type = 'Date'
elif src_fd.GetType() == ogr.OFTTime:
type = 'Time'
elif src_fd.GetType() == ogr.OFTDateTime:
type = 'DateTime'
else:
type = 'String'
vrt += ' <Field name="%s" type="%s"' \
% (Esc(src_fd.GetName()), type)
if not schema:
vrt += ' src="%s"' % Esc(src_fd.GetName())
if src_fd.GetWidth() > 0:
vrt += ' width="%d"' % src_fd.GetWidth()
if src_fd.GetPrecision() > 0:
vrt += ' precision="%d"' % src_fd.GetPrecision()
vrt += '/>\n'
vrt += ' </OGRVRTLayer>\n'
vrt += '</OGRVRTDataSource>\n'
file = open(outfile, 'w')
file.write(vrt)
file.close()
print 'vrt wroted'
| null | null | null | null |
[
0
] |
2,435 |
e2f134f5ff00405396b8bbf4edc263b70ef5d972
|
<mask token>
class BytesWriter:
<mask token>
class BytesReader:
def read(self, n: int=...) ->bytes:
...
def seek(self, offset: int, whence: int=...) ->int:
...
<mask token>
|
<mask token>
class BytesWriter:
def write(self, data: bytes) ->None:
...
class BytesReader:
def read(self, n: int=...) ->bytes:
...
def seek(self, offset: int, whence: int=...) ->int:
...
<mask token>
|
<mask token>
if sys.version_info >= (3, 11):
from typing import assert_type
else:
from typing_extensions import assert_type
str_path: str
pathlib_path: pathlib.Path
str_file: IO[str]
bytes_file: IO[bytes]
npz_file: np.lib.npyio.NpzFile
AR_i8: npt.NDArray[np.int64]
AR_LIKE_f8: list[float]
class BytesWriter:
def write(self, data: bytes) ->None:
...
class BytesReader:
def read(self, n: int=...) ->bytes:
...
def seek(self, offset: int, whence: int=...) ->int:
...
bytes_writer: BytesWriter
bytes_reader: BytesReader
assert_type(npz_file.zip, zipfile.ZipFile)
assert_type(npz_file.fid, None | IO[str])
assert_type(npz_file.files, list[str])
assert_type(npz_file.allow_pickle, bool)
assert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])
assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])
assert_type(npz_file['test'], npt.NDArray[Any])
assert_type(len(npz_file), int)
with npz_file as f:
assert_type(f, np.lib.npyio.NpzFile)
assert_type(np.load(bytes_file), Any)
assert_type(np.load(pathlib_path, allow_pickle=True), Any)
assert_type(np.load(str_path, encoding='bytes'), Any)
assert_type(np.load(bytes_reader), Any)
assert_type(np.save(bytes_file, AR_LIKE_f8), None)
assert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)
assert_type(np.save(str_path, AR_LIKE_f8), None)
assert_type(np.save(bytes_writer, AR_LIKE_f8), None)
assert_type(np.savez(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])
assert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])
assert_type(np.loadtxt(str_file, comments='test'), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, delimiter='\n'), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])
assert_type(np.loadtxt(['1', '2', '3']), npt.NDArray[np.float64])
assert_type(np.fromregex(bytes_file, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.fromregex(str_file, b'test', dtype=float), npt.NDArray[Any])
assert_type(np.fromregex(str_path, re.compile('test'), dtype=np.str_,
encoding='utf8'), npt.NDArray[np.str_])
assert_type(np.fromregex(pathlib_path, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.fromregex(bytes_reader, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])
assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]
)
assert_type(np.genfromtxt(str_file, comments='test'), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, delimiter='\n'), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])
assert_type(np.genfromtxt(['1', '2', '3'], ndmin=2), npt.NDArray[Any])
|
import re
import sys
import zipfile
import pathlib
from typing import IO, Any
from collections.abc import Mapping
import numpy.typing as npt
import numpy as np
from numpy.lib._npyio_impl import BagObj
if sys.version_info >= (3, 11):
from typing import assert_type
else:
from typing_extensions import assert_type
str_path: str
pathlib_path: pathlib.Path
str_file: IO[str]
bytes_file: IO[bytes]
npz_file: np.lib.npyio.NpzFile
AR_i8: npt.NDArray[np.int64]
AR_LIKE_f8: list[float]
class BytesWriter:
def write(self, data: bytes) ->None:
...
class BytesReader:
def read(self, n: int=...) ->bytes:
...
def seek(self, offset: int, whence: int=...) ->int:
...
bytes_writer: BytesWriter
bytes_reader: BytesReader
assert_type(npz_file.zip, zipfile.ZipFile)
assert_type(npz_file.fid, None | IO[str])
assert_type(npz_file.files, list[str])
assert_type(npz_file.allow_pickle, bool)
assert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])
assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])
assert_type(npz_file['test'], npt.NDArray[Any])
assert_type(len(npz_file), int)
with npz_file as f:
assert_type(f, np.lib.npyio.NpzFile)
assert_type(np.load(bytes_file), Any)
assert_type(np.load(pathlib_path, allow_pickle=True), Any)
assert_type(np.load(str_path, encoding='bytes'), Any)
assert_type(np.load(bytes_reader), Any)
assert_type(np.save(bytes_file, AR_LIKE_f8), None)
assert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)
assert_type(np.save(str_path, AR_LIKE_f8), None)
assert_type(np.save(bytes_writer, AR_LIKE_f8), None)
assert_type(np.savez(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])
assert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])
assert_type(np.loadtxt(str_file, comments='test'), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, delimiter='\n'), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])
assert_type(np.loadtxt(['1', '2', '3']), npt.NDArray[np.float64])
assert_type(np.fromregex(bytes_file, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.fromregex(str_file, b'test', dtype=float), npt.NDArray[Any])
assert_type(np.fromregex(str_path, re.compile('test'), dtype=np.str_,
encoding='utf8'), npt.NDArray[np.str_])
assert_type(np.fromregex(pathlib_path, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.fromregex(bytes_reader, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])
assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]
)
assert_type(np.genfromtxt(str_file, comments='test'), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, delimiter='\n'), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])
assert_type(np.genfromtxt(['1', '2', '3'], ndmin=2), npt.NDArray[Any])
|
import re
import sys
import zipfile
import pathlib
from typing import IO, Any
from collections.abc import Mapping
import numpy.typing as npt
import numpy as np
from numpy.lib._npyio_impl import BagObj
if sys.version_info >= (3, 11):
from typing import assert_type
else:
from typing_extensions import assert_type
str_path: str
pathlib_path: pathlib.Path
str_file: IO[str]
bytes_file: IO[bytes]
npz_file: np.lib.npyio.NpzFile
AR_i8: npt.NDArray[np.int64]
AR_LIKE_f8: list[float]
class BytesWriter:
def write(self, data: bytes) -> None: ...
class BytesReader:
def read(self, n: int = ...) -> bytes: ...
def seek(self, offset: int, whence: int = ...) -> int: ...
bytes_writer: BytesWriter
bytes_reader: BytesReader
assert_type(npz_file.zip, zipfile.ZipFile)
assert_type(npz_file.fid, None | IO[str])
assert_type(npz_file.files, list[str])
assert_type(npz_file.allow_pickle, bool)
assert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])
assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])
assert_type(npz_file["test"], npt.NDArray[Any])
assert_type(len(npz_file), int)
with npz_file as f:
assert_type(f, np.lib.npyio.NpzFile)
assert_type(np.load(bytes_file), Any)
assert_type(np.load(pathlib_path, allow_pickle=True), Any)
assert_type(np.load(str_path, encoding="bytes"), Any)
assert_type(np.load(bytes_reader), Any)
assert_type(np.save(bytes_file, AR_LIKE_f8), None)
assert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)
assert_type(np.save(str_path, AR_LIKE_f8), None)
assert_type(np.save(bytes_writer, AR_LIKE_f8), None)
assert_type(np.savez(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])
assert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])
assert_type(np.loadtxt(str_file, comments="test"), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, delimiter="\n"), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])
assert_type(np.loadtxt(["1", "2", "3"]), npt.NDArray[np.float64])
assert_type(np.fromregex(bytes_file, "test", np.float64), npt.NDArray[np.float64])
assert_type(np.fromregex(str_file, b"test", dtype=float), npt.NDArray[Any])
assert_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8"), npt.NDArray[np.str_])
assert_type(np.fromregex(pathlib_path, "test", np.float64), npt.NDArray[np.float64])
assert_type(np.fromregex(bytes_reader, "test", np.float64), npt.NDArray[np.float64])
assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])
assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any])
assert_type(np.genfromtxt(str_file, comments="test"), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, delimiter="\n"), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])
assert_type(np.genfromtxt(["1", "2", "3"], ndmin=2), npt.NDArray[Any])
|
[
4,
5,
6,
7,
8
] |
2,436 |
bbb23d606b081d2591699cb6b9336c8766eea5b2
|
<mask token>
|
<mask token>
for i in s:
if i.isupper():
u += 1
elif i.islower():
l += 1
print(u, l, end='')
|
s = input('enter a string')
u = 0
l = 0
for i in s:
if i.isupper():
u += 1
elif i.islower():
l += 1
print(u, l, end='')
|
s=input("enter a string")
u=0
l=0
for i in s:
if i.isupper():
u+=1
elif i.islower():
l+=1
print(u,l,end="")
| null |
[
0,
1,
2,
3
] |
2,437 |
fdb680f12dfb4b29f25cfe4f7af80469dc4294cf
|
<mask token>
|
COG_QUOTAS = (30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10)
COG_UNSEEN = 1
COG_BATTLED = 2
COG_DEFEATED = 3
COG_COMPLETE1 = 4
COG_COMPLETE2 = 5
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.shtiker.CogPageGlobals
COG_QUOTAS = ((30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10))
COG_UNSEEN = 1
COG_BATTLED = 2
COG_DEFEATED = 3
COG_COMPLETE1 = 4
COG_COMPLETE2 = 5
| null | null |
[
0,
1,
2
] |
2,438 |
0b0282ade565eb4031cef3a2fa8605249f104d9d
|
<mask token>
def main(argc, argv, envir):
raw_samples = np.array([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]])
deal_with_ohe(raw_samples)
ohe = sp.OneHotEncoder(sparse=False, dtype=int)
ohe_samples = ohe.fit_transform(raw_samples)
print(ohe_samples)
return 0
<mask token>
|
<mask token>
def deal_with_ohe(raw_sample):
ohe_samples = []
copy_sample = raw_sample
cols = copy_sample.shape[1]
for col in range(cols):
col_sample = copy_sample[:, col]
type = np.unique(col_sample).size
ohe = []
for raw in col_sample:
sample = np.zeros(type)
sample[raw] = 1
ohe.append(sample)
ohe_samples.append(ohe)
print(np.array(ohe_samples).T)
def main(argc, argv, envir):
raw_samples = np.array([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]])
deal_with_ohe(raw_samples)
ohe = sp.OneHotEncoder(sparse=False, dtype=int)
ohe_samples = ohe.fit_transform(raw_samples)
print(ohe_samples)
return 0
<mask token>
|
<mask token>
def deal_with_ohe(raw_sample):
ohe_samples = []
copy_sample = raw_sample
cols = copy_sample.shape[1]
for col in range(cols):
col_sample = copy_sample[:, col]
type = np.unique(col_sample).size
ohe = []
for raw in col_sample:
sample = np.zeros(type)
sample[raw] = 1
ohe.append(sample)
ohe_samples.append(ohe)
print(np.array(ohe_samples).T)
def main(argc, argv, envir):
raw_samples = np.array([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]])
deal_with_ohe(raw_samples)
ohe = sp.OneHotEncoder(sparse=False, dtype=int)
ohe_samples = ohe.fit_transform(raw_samples)
print(ohe_samples)
return 0
if __name__ == '__main__':
sys.exit(main(len(sys.argv), sys.argv, os.environ))
|
import os
import sys
import platform
import numpy as np
import sklearn.preprocessing as sp
def deal_with_ohe(raw_sample):
ohe_samples = []
copy_sample = raw_sample
cols = copy_sample.shape[1]
for col in range(cols):
col_sample = copy_sample[:, col]
type = np.unique(col_sample).size
ohe = []
for raw in col_sample:
sample = np.zeros(type)
sample[raw] = 1
ohe.append(sample)
ohe_samples.append(ohe)
print(np.array(ohe_samples).T)
def main(argc, argv, envir):
raw_samples = np.array([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]])
deal_with_ohe(raw_samples)
ohe = sp.OneHotEncoder(sparse=False, dtype=int)
ohe_samples = ohe.fit_transform(raw_samples)
print(ohe_samples)
return 0
if __name__ == '__main__':
sys.exit(main(len(sys.argv), sys.argv, os.environ))
|
import os
import sys
import platform
import numpy as np
import sklearn.preprocessing as sp
def deal_with_ohe(raw_sample):
# --------------------#
# 10 100 0001 #
# 01 010 1000 #
# 10 001 0100 #
# 01 100 0010 #
# --------------------#
ohe_samples = []
copy_sample = raw_sample
cols = copy_sample.shape[1]
for col in range(cols):
col_sample = copy_sample[:,col]
type = np.unique(col_sample).size
ohe = []
for raw in col_sample:
sample = np.zeros(type)
sample[raw] = 1
ohe.append(sample)
ohe_samples.append(ohe)
print(np.array(ohe_samples).T)
def main(argc, argv, envir):
raw_samples = np.array([
[0, 0, 3],
[1, 1, 0],
[0, 2, 1],
[1, 0, 2]])
deal_with_ohe(raw_samples)
ohe = sp.OneHotEncoder(sparse=False, dtype=int)
ohe_samples = ohe.fit_transform(raw_samples)
print(ohe_samples)
return 0
if __name__ == "__main__":
sys.exit(main(len(sys.argv),sys.argv, os.environ))
|
[
1,
2,
3,
4,
5
] |
2,439 |
7c2349810fc757848eeb5bddef4640d87d5f9ab9
|
<mask token>
class Helper(object):
<mask token>
def base_dir(self, filePath, folder='data'):
"""
返回公共路径
:parameter folder:文件夹
:parameter filePath:文件名称
"""
return os.path.join(os.path.dirname(os.path.dirname(__file__)),
folder, filePath)
<mask token>
def getUrl(self, rowx):
"""
获取请求地址
:parameter rowx:在excel中的行数
"""
return self.readExcel(rowx)[1]
def getData(self, rowx):
"""
获取数据并且返回
:parameter rowx:在excel中的行数
"""
return json.loads(self.readExcel(rowx)[2])
|
<mask token>
class Helper(object):
<mask token>
def base_dir(self, filePath, folder='data'):
"""
返回公共路径
:parameter folder:文件夹
:parameter filePath:文件名称
"""
return os.path.join(os.path.dirname(os.path.dirname(__file__)),
folder, filePath)
def readExcel(self, rowx, filePath='data.xlsx'):
"""
读取excel中数据并且返回
:parameter filePath:xlsx文件名称
:parameter rowx:在excel中的行数
"""
book = xlrd.open_workbook(self.base_dir(filePath))
sheet = book.sheet_by_index(0)
return sheet.row_values(rowx)
def getUrl(self, rowx):
"""
获取请求地址
:parameter rowx:在excel中的行数
"""
return self.readExcel(rowx)[1]
def getData(self, rowx):
"""
获取数据并且返回
:parameter rowx:在excel中的行数
"""
return json.loads(self.readExcel(rowx)[2])
|
<mask token>
class Helper(object):
"""公共方法"""
def base_dir(self, filePath, folder='data'):
"""
返回公共路径
:parameter folder:文件夹
:parameter filePath:文件名称
"""
return os.path.join(os.path.dirname(os.path.dirname(__file__)),
folder, filePath)
def readExcel(self, rowx, filePath='data.xlsx'):
"""
读取excel中数据并且返回
:parameter filePath:xlsx文件名称
:parameter rowx:在excel中的行数
"""
book = xlrd.open_workbook(self.base_dir(filePath))
sheet = book.sheet_by_index(0)
return sheet.row_values(rowx)
def getUrl(self, rowx):
"""
获取请求地址
:parameter rowx:在excel中的行数
"""
return self.readExcel(rowx)[1]
def getData(self, rowx):
"""
获取数据并且返回
:parameter rowx:在excel中的行数
"""
return json.loads(self.readExcel(rowx)[2])
|
import os
import xlrd
import json
class Helper(object):
"""公共方法"""
def base_dir(self, filePath, folder='data'):
"""
返回公共路径
:parameter folder:文件夹
:parameter filePath:文件名称
"""
return os.path.join(os.path.dirname(os.path.dirname(__file__)),
folder, filePath)
def readExcel(self, rowx, filePath='data.xlsx'):
"""
读取excel中数据并且返回
:parameter filePath:xlsx文件名称
:parameter rowx:在excel中的行数
"""
book = xlrd.open_workbook(self.base_dir(filePath))
sheet = book.sheet_by_index(0)
return sheet.row_values(rowx)
def getUrl(self, rowx):
"""
获取请求地址
:parameter rowx:在excel中的行数
"""
return self.readExcel(rowx)[1]
def getData(self, rowx):
"""
获取数据并且返回
:parameter rowx:在excel中的行数
"""
return json.loads(self.readExcel(rowx)[2])
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
#author:wuya
import os
import xlrd
import json
class Helper(object):
'''公共方法'''
def base_dir(self,filePath,folder='data'):
'''
返回公共路径
:parameter folder:文件夹
:parameter filePath:文件名称
'''
return os.path.join(
os.path.dirname(
os.path.dirname(__file__)),
folder,filePath)
def readExcel(self,rowx,filePath='data.xlsx'):
'''
读取excel中数据并且返回
:parameter filePath:xlsx文件名称
:parameter rowx:在excel中的行数
'''
book=xlrd.open_workbook(self.base_dir(filePath))
sheet=book.sheet_by_index(0)
return sheet.row_values(rowx)
def getUrl(self,rowx):
'''
获取请求地址
:parameter rowx:在excel中的行数
'''
return self.readExcel(rowx)[1]
def getData(self,rowx):
'''
获取数据并且返回
:parameter rowx:在excel中的行数
'''
return json.loads(self.readExcel(rowx)[2])
|
[
4,
5,
6,
7,
8
] |
2,440 |
9de2589cfb5bebba789ece8df9a0fcfbedb01173
|
#!/usr/bin/env python
import sys, re, urllib, urllib2, string, time, os
from urllib2 import Request, urlopen, URLError, HTTPError
from urlparse import urlparse
joomla_version="undefined" #used for joomla veersin info
provided_url="" #the selected provided url
verbose_flag = 0 # If set to 1, prints verbose information
default_input_path = "" # The default input file path
default_output_path = "" # The default output file path
if os.name == "nt":
path_slash = "\\"
else:
path_slash = "/"
# Prints usage
def print_usage():
"""
print_usage()
Prints help screen and exits.
"""
print ""
print ""
print " JoomFind v0.1"
print ""
print " Script made by Jasdev Singh"
print ""
print " This script is made only for educational and offline self-testing "
print " purposes. The creator is not responsible or accountable for any "
print " damage or loss caused that you perform with this script. "
print ""
print " Usage example:"
print '\tpython joomfind.py -f filepath | -v'
print ""
print " Put URL(s) to scan in a newline delimited file"
print " URL(s) must point to homepage of the CMS "
print ""
print " Options:"
print " -f filename (specify input file)"
print " -v, --verbose (show detailed output)"
print " --help (displays this help text)"
print ""
return
# Testing if URL is reachable, with error handling
def test_url():
"""
test_url()
Checks whether URL is rechable. Prints relevant infomation.
"""
global provided_url
global verbose_flag
# extracting url
provided_url = urlparse(provided_url).scheme+"://"+urlparse(provided_url).netloc
print provided_url
if verbose_flag: print "\t[.] Checking if connection can be established...",# + provided_url
try:
response = urllib2.urlopen(provided_url)
except HTTPError, e:
if verbose_flag: print "[!] Failed"
return 0
except URLError, e:
if verbose_flag: print "[!] Failed"
return 0
else:
valid_target = 1
if verbose_flag: print "Success"
return 1
# Scans for the HTML meta tag information
def scan_target_metatag():
"""
scan_target_metatag()
Scans the meta-tag of the website.
The meta-tag has information that can lead to the detection of Joomla.
"""
target_meta_url=provided_url+"/index.php"
if verbose_flag: print "\t[.] Trying to access meta tag information...", #+ target_meta_url
try:
response = urllib2.urlopen(target_meta_url)
html = response.read(2000)
#print html
# Now extract the interesting information
get_metatag = string.find(html, "Joomla! - Open Source Content Management")
# If the target is not vulnerable exit
if get_metatag == -1:
meta_flag=0
if verbose_flag: print "Failed"
else:
meta_flag=1
if verbose_flag: print "Success"
#print "meta flag="+str(meta_flag)
return meta_flag
except:
if verbose_flag: print "Failed"
# Tests whether the URL has a '/administrator' login page
def scan_admin_url():
"""
scan_admin_url()
Scans the administrator URL of the website.
The administrator URL, if reachable, is a clue that Joomla is being used.
"""
target_admin_url=provided_url+"/administrator/index.php"
if verbose_flag: print "\t[.] Trying to access admin login page...", #+ target_admin_url
try:
response = urllib2.urlopen(target_admin_url)
except HTTPError, e:
admin_flag=0
#print "admin flag="+str(admin_flag)
if verbose_flag: print "Failed"
return admin_flag
else:
admin_flag=1
#print "admin flag="+str(admin_flag)
if verbose_flag: print "Success"
return admin_flag
# Scans content of 'com_content' component
def scan_com_content():
"""
scan_com_content()
Scans the content.xml file of the default component of the website.
The content.xml file, if readable, is a clue that Joomla is being used.
"""
target_com_content=provided_url+"/administrator/components/com_content/content.xml"
if verbose_flag: print "\t[.] Trying to access com_content component...", #+ target_com_content
try:
response = urllib2.urlopen(target_com_content)
html = response.read()
get_com = string.find(html, "Joomla")
except HTTPError, e:
com_component_flag=0
#print "com_component flag="+str(com_component_flag)
if verbose_flag: print "Failed"
return com_component_flag
else:
if get_com==-1:
com_component_flag=0
if verbose_flag: print "Failed"
else:
com_component_flag=1
if verbose_flag: print "Success"
#print "com_component flag="+str(com_component_flag)
return com_component_flag
# Scans the robots.txt file
def scan_robots_txt():
"""
scan_robots_txt()
Scans the robots.txt file of website.
The robots.txt file, if readable, has clues that Joomla is being used.
"""
target_robots_txt=provided_url+"/robots.txt"
if verbose_flag: print "\t[.] Trying to access robots.txt file...",#+target_robots_txt
try:
response = urllib2.urlopen(target_robots_txt)
html = response.read()
get_robots = string.find(html, "Joomla")
except HTTPError, e:
robots_flag=0
#print "robots flag="+str(robots_flag)
if verbose_flag: print "Failed"
return robots_flag
else:
if get_robots==-1:
robots_flag=0
if verbose_flag: print "Failed"
else:
robots_flag=1
if verbose_flag: print "Success"
#print "robots flag="+str(robots_flag)
return robots_flag
# Scans the htaccess.txt file
def scan_htaccess():
"""
scan_htaccess()
Scans the htaccess file of website.
The htaccess file, if readable, has clues that Joomla is being used.
"""
target_htacess=provided_url+"/htaccess.txt"
if verbose_flag: print "\t[.] Trying to access htaccess file...",#+target_htacess
try:
response = urllib2.urlopen(target_htacess)
html = response.read()
get_htaccess = string.find(html, "Joomla")
except HTTPError, e:
htaccess_flag=0
#print "htaccess flag="+str(htaccess_flag)
if verbose_flag: print "Failed"
return htaccess_flag
else:
if get_htaccess==-1:
htaccess_flag=0
if verbose_flag: print "Failed"
else:
htaccess_flag=1
if verbose_flag: print "Success"
#print "htaccess flag="+str(htaccess_flag)
return htaccess_flag
# Scans the system.css file
def scan_system_css():
"""
scan_system_css()
Scans the system.css file of website.
The system.css file, if readable, has clues that Joomla is being used.
"""
pass
# Scans the MooTools.js file
def scan_mootools():
"""
scan_mootools()
Scans the mootools.js file of website.
The mootools.js file, if readable, has clues that Joomla is being used.
"""
target_mootools=provided_url+"/media/system/js/mootools-more.js"
if verbose_flag: print "\t[.] Trying to access MooTools file...", #+ target_mootools
try:
response = urllib2.urlopen(target_mootools)
html = response.read(3300)
#print html
get_mootools = string.find(html, 'MooTools.More={version:"1.4.0.1"')
except HTTPError, e:
mootools_flag=0
#print "mootools flag="+str(mootools_flag)
if verbose_flag: print "Failed"
return mootools_flag
else:
if get_mootools==-1:
mootools_flag=0
if verbose_flag: print "Failed"
else:
mootools_flag=1
if verbose_flag: print "Success"
joomla_version="2.x or 3.x"
#print "mootools flag="+str(mootools_flag)
return mootools_flag
# Scans the en-GB.xml file
def scan_engb_ini():
"""
scan_engb_ini()
Scans the en-GB.ini file of website.
The en-GB.ini file, if readable, has clues that Joomla is being used.
"""
target_engb=provided_url+"/language/en-GB/en-GB.xml"
if verbose_flag: print "\t[.] Trying to access en-GB file...", #+ target_engb
try:
response = urllib2.urlopen(target_engb)
html = response.read(200)
#print html
t1 = string.find(html, '<version>')
target_engb = html[t1+9:t1+14]
except HTTPError, e:
engb_flag=0
#print "engb flag="+str(engb_flag)
if verbose_flag: print "Failed"
return engb_flag
else:
if t1==-1:
engb_flag=0
if verbose_flag: print "Failed"
else:
engb_flag=1
if verbose_flag: print "Success"
global joomla_version
joomla_version=target_engb
#print "engb flag="+str(engb_flag)
return engb_flag
# Computes the result of the scans
def compute_result(a,b,c,d,e,f,g):
"""
compute_result()
Computes the final result.
"""
if (a or b or c or d or e or f or g)and ((a+b+c+d+e+f+g)>=3):
return 1
else:
return 0
# Reads URL's from an input file and processes them
def process_from_file():
"""
process_from_file()
Starts processing the URL's from the input file.
"""
global default_input_path
print "JoomFind v 1.0"
print "\n\nTrying to read URL(s) form " + default_input_path + " file...\n"
try:
if not default_input_path:
f = open("urls.txt")
else:
f=open(default_input_path)
cwd=os.getcwd()
file_path = cwd + path_slash + f.name
# extracting url's to list from file
start_urls = [url.strip() for url in f.readlines() if url[0] not in ['#',' ',"\n"]]
if not start_urls:
print "File is empty. Add some URL(s) first.\n"
f.close()
return 0
except:
print "File not found. Make sure it exists.\n"
return 0
#print start_urls
num=str(len(start_urls))
print "Found " + num + " URL(s) on " + time.asctime(time.localtime(time.time())) + "\n"
of=open(default_output_path,'a+')
of.write("\n\n\tScanning " + num + " URL(s) ")
of.write("\n\n\tDate\Time : " + time.asctime(time.localtime(time.time())) )
of.write("\n\n\tInput file path : " + default_input_path + "\n\n")
of.close()
for url in start_urls:
global provided_url
provided_url=url
print "\nWorking on URL " + str(start_urls.index(url)+1) + ": " + provided_url
processing()
print "\nAll done! Check '" + default_output_path +"' file for results.\n"
# Calls other scans and writes results to output file
def processing():
"""
processing()
Calls other helper functions.
"""
err=test_url()
of=open(default_output_path,'a+')
if err!=0:
metaf=scan_target_metatag()
adminf=scan_admin_url()
comf=scan_com_content()
robotsf=scan_robots_txt()
htf=scan_htaccess()
moof=scan_mootools()
engbf=scan_engb_ini()
result=compute_result(metaf,adminf,comf,robotsf,htf,moof,engbf)
if result==1:
#print "THE TARGET IS USING JOOMLA CMS"
#print "Joomla version is " + joomla_version
of.write("\nJOOMLA USED (version : " + joomla_version + ") --> " + provided_url + "\n")
else:
#print "JOOMLA NOT USED"
of.write("\nJOMLA NOT USED --> " + provided_url + "\n")
else:
of.write("\nBAD URL --> " + provided_url + "\n")
of.close()
return 0
# main method
def main():
"""
main()
Starting point of program execution.
"""
# Checking if argument was provided
if len(sys.argv) <=1:
print_usage()
sys.exit(1)
for arg in sys.argv:
# Checking if help was called
if arg == "-h" or arg == "--help":
print_usage()
sys.exit(1)
# Checking for verbose mode
if arg == "-v" or arg == "--verbose":
global verbose_flag
verbose_flag=1
# Checking for input file
if arg == "-f" or arg == "--file":
global default_input_path
global default_output_path
default_input_path = sys.argv[2]
default_output_path=default_input_path[:-4] + "_results.txt"
#if arg == "-u" or arg == "--url":
# input_url = sys.argv[2]
if os.name == "nt":
os.system('cls')
else:
os.system('clear')
process_from_file()
if __name__=="__main__":
main()
#EOF
| null | null | null | null |
[
0
] |
2,441 |
f2397ba3fe1452238f251111f35b06b4a93e0359
|
<mask token>
class TestModel(tl.LightningModule):
def __init__(self):
super().__init__()
self.model = tf.keras.Sequential([tf.keras.layers.Dense(5), tf.
keras.layers.Dense(2)])
<mask token>
<mask token>
def training_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'tr_loss': loss}
result = tl.TrainResult(loss, self.model.trainable_variables, log=log)
return result
<mask token>
<mask token>
class TestDataLoader(tl.LightningDataModule):
def __init__(self):
self.batch_size = 32
def setup(self):
self.tr_dataset = tf.random.normal((256, 7))
self.val_dataset = tf.random.normal((64, 7))
def train_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(
self.batch_size)
return dataset
def val_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(
self.batch_size)
return dataset
<mask token>
|
<mask token>
class TestModel(tl.LightningModule):
def __init__(self):
super().__init__()
self.model = tf.keras.Sequential([tf.keras.layers.Dense(5), tf.
keras.layers.Dense(2)])
def call(self, dataset):
return self.model(dataset)
def configure_optimizers(self):
return tf.keras.optimizers.Adam(0.1),
def training_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'tr_loss': loss}
result = tl.TrainResult(loss, self.model.trainable_variables, log=log)
return result
<mask token>
<mask token>
class TestDataLoader(tl.LightningDataModule):
def __init__(self):
self.batch_size = 32
def setup(self):
self.tr_dataset = tf.random.normal((256, 7))
self.val_dataset = tf.random.normal((64, 7))
def train_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(
self.batch_size)
return dataset
def val_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(
self.batch_size)
return dataset
<mask token>
|
<mask token>
class TestModel(tl.LightningModule):
def __init__(self):
super().__init__()
self.model = tf.keras.Sequential([tf.keras.layers.Dense(5), tf.
keras.layers.Dense(2)])
def call(self, dataset):
return self.model(dataset)
def configure_optimizers(self):
return tf.keras.optimizers.Adam(0.1),
def training_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'tr_loss': loss}
result = tl.TrainResult(loss, self.model.trainable_variables, log=log)
return result
def validation_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'val_loss': loss}
result = tl.EvalResult(loss, log=log)
return result
def checkpointer(self):
return tf.train.Checkpoint(m=self.model, opt0=self.optimizer_0)
class TestDataLoader(tl.LightningDataModule):
def __init__(self):
self.batch_size = 32
def setup(self):
self.tr_dataset = tf.random.normal((256, 7))
self.val_dataset = tf.random.normal((64, 7))
def train_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(
self.batch_size)
return dataset
def val_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(
self.batch_size)
return dataset
<mask token>
|
import tf_lightning as tl
import tensorflow as tf
class TestModel(tl.LightningModule):
def __init__(self):
super().__init__()
self.model = tf.keras.Sequential([tf.keras.layers.Dense(5), tf.
keras.layers.Dense(2)])
def call(self, dataset):
return self.model(dataset)
def configure_optimizers(self):
return tf.keras.optimizers.Adam(0.1),
def training_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'tr_loss': loss}
result = tl.TrainResult(loss, self.model.trainable_variables, log=log)
return result
def validation_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'val_loss': loss}
result = tl.EvalResult(loss, log=log)
return result
def checkpointer(self):
return tf.train.Checkpoint(m=self.model, opt0=self.optimizer_0)
class TestDataLoader(tl.LightningDataModule):
def __init__(self):
self.batch_size = 32
def setup(self):
self.tr_dataset = tf.random.normal((256, 7))
self.val_dataset = tf.random.normal((64, 7))
def train_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(
self.batch_size)
return dataset
def val_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(
self.batch_size)
return dataset
if __name__ == '__main__':
model = TestModel()
dataloader = TestDataLoader()
trainer = tl.Trainer()
trainer.fit(model, dataloader)
|
# __author__ = 'Vasudev Gupta'
import tf_lightning as tl
import tensorflow as tf
class TestModel(tl.LightningModule):
# just a random model with random dataset
def __init__(self):
# simple test model
super().__init__()
self.model = tf.keras.Sequential([
tf.keras.layers.Dense(5),
tf.keras.layers.Dense(2)
])
def call(self, dataset):
return self.model(dataset)
def configure_optimizers(self):
return tf.keras.optimizers.Adam(0.1),
def training_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'tr_loss': loss}
result = tl.TrainResult(
loss, self.model.trainable_variables, log=log)
return result
def validation_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'val_loss': loss}
result = tl.EvalResult(loss, log=log)
return result
def checkpointer(self):
return tf.train.Checkpoint(m=self.model,
opt0=self.optimizer_0)
class TestDataLoader(tl.LightningDataModule):
# using random dataset
def __init__(self):
self.batch_size = 32
def setup(self):
self.tr_dataset = tf.random.normal((256, 7))
self.val_dataset = tf.random.normal((64, 7))
def train_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(
self.tr_dataset).batch(self.batch_size)
return dataset
def val_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(
self.val_dataset).batch(self.batch_size)
return dataset
if __name__ == '__main__':
model = TestModel()
dataloader = TestDataLoader()
trainer = tl.Trainer()
trainer.fit(model, dataloader)
|
[
8,
10,
12,
14,
15
] |
2,442 |
4a63431aa71ca3f4b75fcd89a50bf599e7717645
|
<mask token>
|
<mask token>
def main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):
hlp.setup_logging()
if number_partitions is None or number_partitions == 0:
do_concat = False
partitions_from_files = True
early_subsampling = False
late_subsampling = True
else:
do_concat = True
partitions_from_files = False
early_subsampling = True
late_subsampling = False
if not do_subsampling:
early_subsampling = late_subsampling = False
X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,
do_subsampling=early_subsampling, do_concat=do_concat)
clf = s.get_svclassifier(C=C, gamma=gamma)
scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=
number_partitions, clf=clf, files_as_folds=partitions_from_files,
do_subsampling=late_subsampling)
evaluation = s.get_eval_report(scores)
hlp.log(scores)
hlp.log(evaluation)
if write_labels:
dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')
if do_concat:
dbg.write_list_to_dir(dir_train, y, 'y_true.txt')
<mask token>
|
<mask token>
def main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):
hlp.setup_logging()
if number_partitions is None or number_partitions == 0:
do_concat = False
partitions_from_files = True
early_subsampling = False
late_subsampling = True
else:
do_concat = True
partitions_from_files = False
early_subsampling = True
late_subsampling = False
if not do_subsampling:
early_subsampling = late_subsampling = False
X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,
do_subsampling=early_subsampling, do_concat=do_concat)
clf = s.get_svclassifier(C=C, gamma=gamma)
scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=
number_partitions, clf=clf, files_as_folds=partitions_from_files,
do_subsampling=late_subsampling)
evaluation = s.get_eval_report(scores)
hlp.log(scores)
hlp.log(evaluation)
if write_labels:
dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')
if do_concat:
dbg.write_list_to_dir(dir_train, y, 'y_true.txt')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Print evaluation metrics for cross validating an HSV classifier.')
parser.add_argument('dir_train', help=
'Directory containing all feature XMLs and label CSVs for cross validating the classifier. CSVs need to have the same file name as their corresponding XML.'
)
parser.add_argument('-c', '--C_value', help=
'Omit the grid search and directly specify a C value.', type=float)
parser.add_argument('-g', '--gamma_value', help=
'Omit the grid search and directly specify a gamma value.', type=float)
parser.add_argument('-p', '--number_partitions', help=
'Set the number of partitions for cross validation. If omitted, take each file as a partition.'
, type=int)
parser.add_argument('-s', '--subsampling', help=
'Subsample majority class', action='store_true')
parser.add_argument('-wl', '--write_labels', help=
'Write both true and predicted labels of the eval file(s) to TXT files.'
, action='store_true')
args = parser.parse_args()
main(args.dir_train, args.C_value, args.gamma_value, args.
number_partitions, args.subsampling, args.write_labels)
|
import argparse
import debug.debug as dbg
import helper.helper as hlp
import prep.preprocessor as pre
import sample.sample as s
def main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):
hlp.setup_logging()
if number_partitions is None or number_partitions == 0:
do_concat = False
partitions_from_files = True
early_subsampling = False
late_subsampling = True
else:
do_concat = True
partitions_from_files = False
early_subsampling = True
late_subsampling = False
if not do_subsampling:
early_subsampling = late_subsampling = False
X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,
do_subsampling=early_subsampling, do_concat=do_concat)
clf = s.get_svclassifier(C=C, gamma=gamma)
scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=
number_partitions, clf=clf, files_as_folds=partitions_from_files,
do_subsampling=late_subsampling)
evaluation = s.get_eval_report(scores)
hlp.log(scores)
hlp.log(evaluation)
if write_labels:
dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')
if do_concat:
dbg.write_list_to_dir(dir_train, y, 'y_true.txt')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Print evaluation metrics for cross validating an HSV classifier.')
parser.add_argument('dir_train', help=
'Directory containing all feature XMLs and label CSVs for cross validating the classifier. CSVs need to have the same file name as their corresponding XML.'
)
parser.add_argument('-c', '--C_value', help=
'Omit the grid search and directly specify a C value.', type=float)
parser.add_argument('-g', '--gamma_value', help=
'Omit the grid search and directly specify a gamma value.', type=float)
parser.add_argument('-p', '--number_partitions', help=
'Set the number of partitions for cross validation. If omitted, take each file as a partition.'
, type=int)
parser.add_argument('-s', '--subsampling', help=
'Subsample majority class', action='store_true')
parser.add_argument('-wl', '--write_labels', help=
'Write both true and predicted labels of the eval file(s) to TXT files.'
, action='store_true')
args = parser.parse_args()
main(args.dir_train, args.C_value, args.gamma_value, args.
number_partitions, args.subsampling, args.write_labels)
|
import argparse
import debug.debug as dbg
import helper.helper as hlp
import prep.preprocessor as pre
import sample.sample as s
def main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):
hlp.setup_logging()
# Files as folds?
if number_partitions is None or number_partitions == 0: # Yes
do_concat = False
partitions_from_files = True
early_subsampling = False
late_subsampling = True
else: # No
do_concat = True
partitions_from_files = False
early_subsampling = True
late_subsampling = False
if not do_subsampling:
early_subsampling = late_subsampling = False
X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train, do_subsampling=early_subsampling,
do_concat=do_concat)
clf = s.get_svclassifier(C=C, gamma=gamma)
scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=number_partitions, clf=clf,
files_as_folds=partitions_from_files, do_subsampling=late_subsampling)
evaluation = s.get_eval_report(scores)
hlp.log(scores)
hlp.log(evaluation)
if write_labels:
dbg.write_list_to_dir(dir_train, y_pred, "y_pred.txt")
if do_concat:
dbg.write_list_to_dir(dir_train, y, "y_true.txt")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Print evaluation metrics for cross validating an HSV classifier.")
parser.add_argument("dir_train",
help="Directory containing all feature XMLs and label CSVs for cross validating the "
"classifier. CSVs need to have the same file name as their corresponding XML.")
parser.add_argument("-c", "--C_value", help="Omit the grid search and directly specify a C value.", type=float)
parser.add_argument("-g", "--gamma_value", help="Omit the grid search and directly specify a gamma value.",
type=float)
parser.add_argument("-p", "--number_partitions",
help="Set the number of partitions for cross validation. If omitted, take each file "
"as a partition.", type=int)
parser.add_argument("-s", "--subsampling", help="Subsample majority class", action="store_true")
parser.add_argument("-wl", "--write_labels",
help="Write both true and predicted labels of the eval file(s) to TXT files.",
action="store_true")
args = parser.parse_args()
main(args.dir_train, args.C_value, args.gamma_value, args.number_partitions, args.subsampling, args.write_labels)
|
[
0,
1,
2,
3,
4
] |
2,443 |
8ede786526f4b730173777d9d3b9c7e4554fc887
|
<mask token>
|
config_info = {'n_input': 1, 'num_layers': 1, 'features': 20,
'sequence_length': 1344, 'num_steps': None, 'lstm_size': None,
'batch_size': None, 'init_learning_rate': None, 'learning_rate_decay':
None, 'init_epoch': None, 'max_epoch': None, 'dropout_rate': None}
|
config_info = {
'n_input': 1,
'num_layers': 1,
'features': 20,
'sequence_length': 1344,
'num_steps' : None,
'lstm_size' : None,
'batch_size' : None,
'init_learning_rate' : None,
'learning_rate_decay' : None,
'init_epoch' : None,
'max_epoch' : None,
'dropout_rate' : None
}
| null | null |
[
0,
1,
2
] |
2,444 |
624ecf743d5be1acc33df14bd721b3103d232f0e
|
#!/bin/usr/python
'''
Author: SaiKumar Immadi
Basic DBSCAN clustering algorithm written in python
5th Semester @ IIIT Guwahati
'''
# You can use this code for free. Just don't plagiarise it for your lab assignments
import sys
from math import sqrt
from random import randint
import matplotlib.pyplot as plt
def main(argv):
global e,mainList,minPts,clusters,outliers
mainList=[]
clusters=[]
outliers=[]
if(len(argv)!=3):
print "The Format is <dbscan.py minPts e data.txt>"
return 0
minPts=int(argv[0])
e=float(argv[1])
if(minPts<2 or e<=0):
print "minPts should be greater than or equal to 2"
print "e should be greater than 0"
return 0
filename=argv[2]
file=open(filename,"r")
for line in file:
lineStripped=line.strip().split('\t')
mainList.append((float(lineStripped[0]),float(lineStripped[1])))
file.close()
while(len(mainList)>0):
point=mainList.pop(0)
mainEneigh=calcEneigh(point,1,[])
outEneigh=calcEneigh(point,2,[])
if(len(mainEneigh+outEneigh)>=minPts):
cluster=calcCluster(point)
clusters.append(cluster)
else:
outliers.append(point)
fig=plt.figure()
cluster_count=0
for cluster in clusters:
cluster_count+=1
x_coordinates=[]
y_coordinates=[]
for point in cluster:
x_coordinates.append(point[0])
y_coordinates.append(point[1])
label_name="Cluster : %.d" % (cluster_count)
plt.scatter(x_coordinates,y_coordinates,s=5,label=label_name)
x_out_coordinates=[]
y_out_coordinates=[]
for outlier in outliers:
x_out_coordinates.append(outlier[0])
y_out_coordinates.append(outlier[1])
plt.scatter(x_out_coordinates,y_out_coordinates,s=5,label='outliers')
plt.title('DBSCAN Clustering')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.legend()
fig.savefig('output.jpg')
print len(clusters),"clusters"
plt.show()
return 0
def calcEneigh(p,opt,optList):
global e,mainList,minPts,clusters,outliers
if(opt==1):
list=mainList
elif(opt==2):
list=outliers
elif(opt==3):
list=optList
eneigh=[]
for point in list:
x1=p[0]
y1=p[1]
x2=point[0]
y2=point[1]
dist = sqrt((x2 - x1)**2 + (y2 - y1)**2)
if(dist<=e):
eneigh.append(point)
return eneigh
def calcCluster(p):
global e,mainList,minPts,clusters,outliers
cluster=[]
tempList=[]
tempList.append(p)
while(len(tempList)>0):
point=tempList.pop(0)
mainEneigh=calcEneigh(point,1,[])
outEneigh=calcEneigh(point,2,[])
clusterEneigh=calcEneigh(point,3,cluster+tempList)
cluster.append(point)
for x in mainEneigh:
mainList.remove(x)
for x in outEneigh:
outliers.remove(x)
if(len(mainEneigh+outEneigh+clusterEneigh)>=minPts):
tempList=tempList+mainEneigh+outEneigh
else:
cluster=cluster+mainEneigh+outEneigh
return cluster
if __name__ == "__main__":
main(sys.argv[1:])
| null | null | null | null |
[
0
] |
2,445 |
aeef27d667f95e3818f73533439385ea949b96a4
|
<mask token>
class MainHandler(webapp2.RequestHandler):
<mask token>
def get(self):
""" Show home page """
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'Welcome %s!' % (contact.name,)
else:
contact = 'Invalid dhs.sg user'
greeting = ''
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
contact = 'Not authorised'
greeting = 'You need to'
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext}
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(template_values))
class Submit(webapp2.RequestHandler):
""" Submit form """
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'User: %s' % (contact.name,)
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext, 'contact.handphone':
updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
<mask token>
|
<mask token>
class Contact(db.Expando):
<mask token>
pid = db.StringProperty(required=True)
name = db.StringProperty(required=True)
class12 = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
handphone = db.StringProperty(required=False)
tickets_csjh = db.StringProperty(required=False)
tickets_edssh = db.StringProperty(required=False)
remark = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
""" Home page handler """
def get(self):
""" Show home page """
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'Welcome %s!' % (contact.name,)
else:
contact = 'Invalid dhs.sg user'
greeting = ''
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
contact = 'Not authorised'
greeting = 'You need to'
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext}
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(template_values))
class Submit(webapp2.RequestHandler):
""" Submit form """
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'User: %s' % (contact.name,)
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext, 'contact.handphone':
updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
<mask token>
|
<mask token>
class Contact(db.Expando):
""" User data model """
pid = db.StringProperty(required=True)
name = db.StringProperty(required=True)
class12 = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
handphone = db.StringProperty(required=False)
tickets_csjh = db.StringProperty(required=False)
tickets_edssh = db.StringProperty(required=False)
remark = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
""" Home page handler """
def get(self):
""" Show home page """
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'Welcome %s!' % (contact.name,)
else:
contact = 'Invalid dhs.sg user'
greeting = ''
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
contact = 'Not authorised'
greeting = 'You need to'
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext}
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(template_values))
class Submit(webapp2.RequestHandler):
""" Submit form """
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'User: %s' % (contact.name,)
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext, 'contact.handphone':
updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
<mask token>
|
import webapp2
import jinja2
import os
import csv
from google.appengine.api import users
from google.appengine.ext import db
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.
path.dirname(__file__)))
class Contact(db.Expando):
""" User data model """
pid = db.StringProperty(required=True)
name = db.StringProperty(required=True)
class12 = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
handphone = db.StringProperty(required=False)
tickets_csjh = db.StringProperty(required=False)
tickets_edssh = db.StringProperty(required=False)
remark = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
""" Home page handler """
def get(self):
""" Show home page """
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'Welcome %s!' % (contact.name,)
else:
contact = 'Invalid dhs.sg user'
greeting = ''
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
contact = 'Not authorised'
greeting = 'You need to'
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext}
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(template_values))
class Submit(webapp2.RequestHandler):
""" Submit form """
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = 'User: %s' % (contact.name,)
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {'contact': contact, 'greeting': greeting, 'url':
url, 'url_linktext': url_linktext, 'contact.handphone':
updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
contact2 = Contact(pid='lim.ahseng', name='Lim Ah Seng', class12='5C99',
email='[email protected]', handphone='', tickets_csjh='', tickets_edssh
='', remark='')
contact2.put()
app = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],
debug=True)
|
#!/usr/bin/env python
import webapp2 # web application framework
import jinja2 # template engine
import os # access file system
import csv
from google.appengine.api import users # Google account authentication
from google.appengine.ext import db # datastore
# initialise template
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class Contact(db.Expando): # allows for different number of fields
''' User data model '''
pid = db.StringProperty(required=True) # string = 500 char, allow field to be indexed, perform faster
name = db.StringProperty(required=True)
class12 = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
handphone = db.StringProperty(required=False)
tickets_csjh = db.StringProperty(required=False)
tickets_edssh = db.StringProperty(required=False)
remark = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
''' Home page handler '''
def get(self):
''' Show home page '''
# import data
# check if valid Google account
# school_register = csv.reader(open('data.csv'),delimiter=',')
# found = False
user = users.get_current_user()
# for student in school_register: # if valid logged in user
# if student[0] == self.request.get('pid'):
# contact = student
# found = True
# break
if user:
# logout link
url = users.create_logout_url(self.request.uri)
# logout text
url_linktext = 'Logout'
# retrieve user record from datastore
# may get multiple records, so in order to get one record:
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result: #if user record found
contact = result[0]
greeting = ("Welcome %s!" % (contact.name,)) #1 item in couple = put comma
else: #not found
contact = "Invalid dhs.sg user"
greeting = ""
else: # not logged in
# login link
url = users.create_login_url(self.request.uri)
# login text
url_linktext = 'Login'
contact = "Not authorised"
greeting = "You need to"
template_values = {
'contact': contact,
'greeting': greeting,
'url': url,
'url_linktext': url_linktext,
}
# create index.html template
template = jinja_environment.get_template('index.html')
# associate template values with template
self.response.out.write(template.render(template_values))
class Submit(webapp2.RequestHandler):
''' Submit form '''
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = ("User: %s" % (contact.name,))
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {
'contact': contact,
'greeting': greeting,
'url': url,
'url_linktext': url_linktext,
'contact.handphone': updated_handphone,
'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark,
}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
# main
contact2 = Contact(pid = 'lim.ahseng', name = 'Lim Ah Seng', class12 = '5C99', email = '[email protected]', handphone = '', tickets_csjh = '', tickets_edssh = '', remark = '')
contact2.put()
app = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],
debug=True)
|
[
5,
8,
9,
12,
13
] |
2,446 |
7f5f16ea10980e0ade7357cdae38f47f8d7cdf01
|
<mask token>
|
<mask token>
def count_words(sentence):
sentence = re.findall("\\b[\\w'-]+\\b", sentence.lower().replace('_', ' '))
counts = defaultdict(lambda : 0)
for word in sentence:
counts[word] += 1
return counts
|
import re
from collections import defaultdict
def count_words(sentence):
sentence = re.findall("\\b[\\w'-]+\\b", sentence.lower().replace('_', ' '))
counts = defaultdict(lambda : 0)
for word in sentence:
counts[word] += 1
return counts
|
import re
from collections import defaultdict
def count_words(sentence):
# extract all the words as per definition
sentence = re.findall(r"\b[\w'-]+\b", sentence.lower().replace('_', ' '))
counts = defaultdict(lambda: 0)
# Counting the frequency of each words
for word in sentence:
counts[word] += 1
return counts
| null |
[
0,
1,
2,
3
] |
2,447 |
f26e6164fc4c07fd3339171e316b3a1f7a4be669
|
<mask token>
def eval_ground_scores(gt_relations, pred_relations, tiou_threshold):
"""
:param gt_relations:
:param pred_relations:
:param tiou_threshold:
:return:
"""
relation_num = len(gt_relations)
predict, predict_sub, predict_obj = 0, 0, 0
for relation, pred_trajs in pred_relations.items():
pred_sub = pred_trajs['sub']
pred_obj = pred_trajs['obj']
flag, flag_s, flag_o = False, False, False
gt_trajs = gt_relations[relation]
for gt_traj in gt_trajs:
gt_sub = gt_traj['sub']
gt_obj = gt_traj['obj']
s_tiou = tiou(pred_sub, gt_sub)
o_tiou = tiou(pred_obj, gt_obj)
r_iou = min(s_tiou, o_tiou)
if r_iou >= tiou_threshold:
flag = True
if s_tiou >= tiou_threshold:
flag_s = True
if o_tiou >= tiou_threshold:
flag_o = True
if flag:
predict += 1
if flag_s:
predict_sub += 1
if flag_o:
predict_obj += 1
predict = predict / relation_num
predict_sub = predict_sub / relation_num
predict_obj = predict_obj / relation_num
return predict, predict_sub, predict_obj, relation_num
<mask token>
|
<mask token>
def eval_ground_scores(gt_relations, pred_relations, tiou_threshold):
"""
:param gt_relations:
:param pred_relations:
:param tiou_threshold:
:return:
"""
relation_num = len(gt_relations)
predict, predict_sub, predict_obj = 0, 0, 0
for relation, pred_trajs in pred_relations.items():
pred_sub = pred_trajs['sub']
pred_obj = pred_trajs['obj']
flag, flag_s, flag_o = False, False, False
gt_trajs = gt_relations[relation]
for gt_traj in gt_trajs:
gt_sub = gt_traj['sub']
gt_obj = gt_traj['obj']
s_tiou = tiou(pred_sub, gt_sub)
o_tiou = tiou(pred_obj, gt_obj)
r_iou = min(s_tiou, o_tiou)
if r_iou >= tiou_threshold:
flag = True
if s_tiou >= tiou_threshold:
flag_s = True
if o_tiou >= tiou_threshold:
flag_o = True
if flag:
predict += 1
if flag_s:
predict_sub += 1
if flag_o:
predict_obj += 1
predict = predict / relation_num
predict_sub = predict_sub / relation_num
predict_obj = predict_obj / relation_num
return predict, predict_sub, predict_obj, relation_num
def evaluate(groundtruth, prediction, tiou_threshold=0.5):
""" evaluate visual relation detection and visual
relation tagging.
"""
video_num = len(groundtruth)
print('Computing grounding accuracy over {} videos...'.format(video_num))
acc, acc_sub, acc_obj = 0.0, 0.0, 0.0
gt_rnum = 0
for qid, relation_gt in groundtruth.items():
if qid not in prediction:
continue
relation_pred = prediction[qid]
if len(relation_pred) == 0:
continue
video_acc, video_acc_sub, video_acc_obj, relation_num = (
eval_ground_scores(relation_gt, relation_pred, tiou_threshold))
acc += video_acc
acc_sub += video_acc_sub
acc_obj += video_acc_obj
gt_rnum += relation_num
acc /= video_num
acc_sub /= video_num
acc_obj /= video_num
print('Acc_S\t Acc_O\t Acc_R')
print('{:.2f}\t {:.2f}\t {:.2f}'.format(acc_sub * 100, acc_obj * 100,
acc * 100))
def main():
groundtruth_dir = 'dataset/vidvrd/'
gt_file = osp.join(groundtruth_dir, 'gt_relation_frame.json')
result_dir = 'results/'
res_file = osp.join(result_dir, 'test_viterbi_1gap_04_batch.json')
if not osp.exists(res_file):
print('Generating ...')
generate_track_link.main(res_file)
grountruth = load_file(gt_file)
prediction = load_file(res_file)
evaluate(grountruth, prediction)
<mask token>
|
<mask token>
def eval_ground_scores(gt_relations, pred_relations, tiou_threshold):
"""
:param gt_relations:
:param pred_relations:
:param tiou_threshold:
:return:
"""
relation_num = len(gt_relations)
predict, predict_sub, predict_obj = 0, 0, 0
for relation, pred_trajs in pred_relations.items():
pred_sub = pred_trajs['sub']
pred_obj = pred_trajs['obj']
flag, flag_s, flag_o = False, False, False
gt_trajs = gt_relations[relation]
for gt_traj in gt_trajs:
gt_sub = gt_traj['sub']
gt_obj = gt_traj['obj']
s_tiou = tiou(pred_sub, gt_sub)
o_tiou = tiou(pred_obj, gt_obj)
r_iou = min(s_tiou, o_tiou)
if r_iou >= tiou_threshold:
flag = True
if s_tiou >= tiou_threshold:
flag_s = True
if o_tiou >= tiou_threshold:
flag_o = True
if flag:
predict += 1
if flag_s:
predict_sub += 1
if flag_o:
predict_obj += 1
predict = predict / relation_num
predict_sub = predict_sub / relation_num
predict_obj = predict_obj / relation_num
return predict, predict_sub, predict_obj, relation_num
def evaluate(groundtruth, prediction, tiou_threshold=0.5):
""" evaluate visual relation detection and visual
relation tagging.
"""
video_num = len(groundtruth)
print('Computing grounding accuracy over {} videos...'.format(video_num))
acc, acc_sub, acc_obj = 0.0, 0.0, 0.0
gt_rnum = 0
for qid, relation_gt in groundtruth.items():
if qid not in prediction:
continue
relation_pred = prediction[qid]
if len(relation_pred) == 0:
continue
video_acc, video_acc_sub, video_acc_obj, relation_num = (
eval_ground_scores(relation_gt, relation_pred, tiou_threshold))
acc += video_acc
acc_sub += video_acc_sub
acc_obj += video_acc_obj
gt_rnum += relation_num
acc /= video_num
acc_sub /= video_num
acc_obj /= video_num
print('Acc_S\t Acc_O\t Acc_R')
print('{:.2f}\t {:.2f}\t {:.2f}'.format(acc_sub * 100, acc_obj * 100,
acc * 100))
def main():
groundtruth_dir = 'dataset/vidvrd/'
gt_file = osp.join(groundtruth_dir, 'gt_relation_frame.json')
result_dir = 'results/'
res_file = osp.join(result_dir, 'test_viterbi_1gap_04_batch.json')
if not osp.exists(res_file):
print('Generating ...')
generate_track_link.main(res_file)
grountruth = load_file(gt_file)
prediction = load_file(res_file)
evaluate(grountruth, prediction)
if __name__ == '__main__':
main()
|
import os.path as osp
from evaluations.common import tiou
from evaluations.util import load_file
import generate_track_link
def eval_ground_scores(gt_relations, pred_relations, tiou_threshold):
"""
:param gt_relations:
:param pred_relations:
:param tiou_threshold:
:return:
"""
relation_num = len(gt_relations)
predict, predict_sub, predict_obj = 0, 0, 0
for relation, pred_trajs in pred_relations.items():
pred_sub = pred_trajs['sub']
pred_obj = pred_trajs['obj']
flag, flag_s, flag_o = False, False, False
gt_trajs = gt_relations[relation]
for gt_traj in gt_trajs:
gt_sub = gt_traj['sub']
gt_obj = gt_traj['obj']
s_tiou = tiou(pred_sub, gt_sub)
o_tiou = tiou(pred_obj, gt_obj)
r_iou = min(s_tiou, o_tiou)
if r_iou >= tiou_threshold:
flag = True
if s_tiou >= tiou_threshold:
flag_s = True
if o_tiou >= tiou_threshold:
flag_o = True
if flag:
predict += 1
if flag_s:
predict_sub += 1
if flag_o:
predict_obj += 1
predict = predict / relation_num
predict_sub = predict_sub / relation_num
predict_obj = predict_obj / relation_num
return predict, predict_sub, predict_obj, relation_num
def evaluate(groundtruth, prediction, tiou_threshold=0.5):
""" evaluate visual relation detection and visual
relation tagging.
"""
video_num = len(groundtruth)
print('Computing grounding accuracy over {} videos...'.format(video_num))
acc, acc_sub, acc_obj = 0.0, 0.0, 0.0
gt_rnum = 0
for qid, relation_gt in groundtruth.items():
if qid not in prediction:
continue
relation_pred = prediction[qid]
if len(relation_pred) == 0:
continue
video_acc, video_acc_sub, video_acc_obj, relation_num = (
eval_ground_scores(relation_gt, relation_pred, tiou_threshold))
acc += video_acc
acc_sub += video_acc_sub
acc_obj += video_acc_obj
gt_rnum += relation_num
acc /= video_num
acc_sub /= video_num
acc_obj /= video_num
print('Acc_S\t Acc_O\t Acc_R')
print('{:.2f}\t {:.2f}\t {:.2f}'.format(acc_sub * 100, acc_obj * 100,
acc * 100))
def main():
groundtruth_dir = 'dataset/vidvrd/'
gt_file = osp.join(groundtruth_dir, 'gt_relation_frame.json')
result_dir = 'results/'
res_file = osp.join(result_dir, 'test_viterbi_1gap_04_batch.json')
if not osp.exists(res_file):
print('Generating ...')
generate_track_link.main(res_file)
grountruth = load_file(gt_file)
prediction = load_file(res_file)
evaluate(grountruth, prediction)
if __name__ == '__main__':
main()
|
import os.path as osp
from evaluations.common import tiou
from evaluations.util import load_file
import generate_track_link
def eval_ground_scores(gt_relations, pred_relations, tiou_threshold):
"""
:param gt_relations:
:param pred_relations:
:param tiou_threshold:
:return:
"""
# pred_relations = sorted(pred_relations, key=lambda x: x['score'], reverse=True)
relation_num = len(gt_relations)
predict, predict_sub, predict_obj = 0, 0, 0
for relation, pred_trajs in pred_relations.items():
pred_sub = pred_trajs['sub']
pred_obj = pred_trajs['obj']
flag, flag_s, flag_o = False, False, False
gt_trajs = gt_relations[relation]
# print(relation)
for gt_traj in gt_trajs:
gt_sub = gt_traj['sub']
gt_obj = gt_traj['obj']
s_tiou = tiou(pred_sub, gt_sub)
o_tiou = tiou(pred_obj, gt_obj)
r_iou = min(s_tiou, o_tiou)
if r_iou >= tiou_threshold:
flag = True
if s_tiou >= tiou_threshold:
flag_s = True
if o_tiou >= tiou_threshold:
flag_o = True
if flag:
predict += 1
if flag_s:
predict_sub += 1
if flag_o:
predict_obj += 1
predict = predict / relation_num
predict_sub = predict_sub /relation_num
predict_obj = predict_obj /relation_num
return predict, predict_sub, predict_obj, relation_num
def evaluate(groundtruth, prediction, tiou_threshold=0.5):
""" evaluate visual relation detection and visual
relation tagging.
"""
video_num = len(groundtruth)
print('Computing grounding accuracy over {} videos...'.format(video_num))
acc, acc_sub, acc_obj = 0.0, 0.0, 0.0
gt_rnum = 0
for qid, relation_gt in groundtruth.items():
if qid not in prediction:
continue
relation_pred = prediction[qid]
if len(relation_pred) == 0:
continue
video_acc, video_acc_sub, video_acc_obj, relation_num = eval_ground_scores(relation_gt, relation_pred, tiou_threshold)
acc += video_acc
acc_sub += video_acc_sub
acc_obj += video_acc_obj
gt_rnum += relation_num
acc /= video_num
acc_sub /= video_num
acc_obj /= video_num
print("Acc_S\t Acc_O\t Acc_R")
print('{:.2f}\t {:.2f}\t {:.2f}'.format(acc_sub*100, acc_obj*100, acc*100))
def main():
groundtruth_dir = 'dataset/vidvrd/'
gt_file = osp.join(groundtruth_dir, 'gt_relation_frame.json')
result_dir = 'results/'
res_file = osp.join(result_dir, 'test_viterbi_1gap_04_batch.json')
if not osp.exists(res_file):
print('Generating ...')
generate_track_link.main(res_file)
grountruth = load_file(gt_file)
prediction = load_file(res_file)
evaluate(grountruth, prediction)
if __name__ == "__main__":
main()
|
[
1,
3,
4,
5,
6
] |
2,448 |
9555e5f75e3045afff6da9228764fca542caf539
|
<mask token>
|
<mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
|
<mask token>
class Migration(migrations.Migration):
dependencies = []
operations = [migrations.CreateModel(name='Beach', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('name', models.CharField(max_length=128)
)]), migrations.CreateModel(name='SelectedBeach', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('json_beach', models.ForeignKey(
related_name='json', blank=True, to='testapp.Beach', null=True)), (
'rest_framework_beach', models.ForeignKey(related_name='rest',
blank=True, to='testapp.Beach', null=True)), (
'tastypie_beach_contains', models.ForeignKey(related_name=
'tp_contains', blank=True, to='testapp.Beach', null=True)), (
'tastypie_beach_starts', models.ForeignKey(related_name='tp_starts',
blank=True, to='testapp.Beach', null=True))])]
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = []
operations = [migrations.CreateModel(name='Beach', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('name', models.CharField(max_length=128)
)]), migrations.CreateModel(name='SelectedBeach', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('json_beach', models.ForeignKey(
related_name='json', blank=True, to='testapp.Beach', null=True)), (
'rest_framework_beach', models.ForeignKey(related_name='rest',
blank=True, to='testapp.Beach', null=True)), (
'tastypie_beach_contains', models.ForeignKey(related_name=
'tp_contains', blank=True, to='testapp.Beach', null=True)), (
'tastypie_beach_starts', models.ForeignKey(related_name='tp_starts',
blank=True, to='testapp.Beach', null=True))])]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Beach',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='SelectedBeach',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('json_beach', models.ForeignKey(related_name='json', blank=True, to='testapp.Beach', null=True)),
('rest_framework_beach', models.ForeignKey(related_name='rest', blank=True, to='testapp.Beach', null=True)),
('tastypie_beach_contains', models.ForeignKey(related_name='tp_contains', blank=True, to='testapp.Beach', null=True)),
('tastypie_beach_starts', models.ForeignKey(related_name='tp_starts', blank=True, to='testapp.Beach', null=True)),
],
),
]
|
[
0,
1,
2,
3,
4
] |
2,449 |
97ea837961c92b5c92a93ec33ac016de7ff1e876
|
<mask token>
class simpleLSTM:
<mask token>
def create_dataset(self, dataset, look_back=4):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset.iloc[i:i + look_back]
dataX.append(a)
dataY.append(dataset.iloc[i + look_back])
return np.array(dataX), np.array(dataY)
<mask token>
<mask token>
def LSTM_CNN(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2019-01-01')
train, val = self.split_dataset(train, '2014-01-01')
batch_size = 1
look_back = 3
EPOCHS = 100
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = False
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
print('[INFO] MODEL LOADED...')
else:
input_shape = num_of_features, look_back
model = Sequential()
model.add(LSTM(32, activation='relu', input_shape=input_shape))
model.add(Dropout(0.2))
model.add(Dense(num_of_features, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)
history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,
validation_data=(valX, valY))
model.save(self.MODEL_PATH)
print('[INFO] MODEL SAVED...')
trainPredict = model.predict(trainX)
valPredict = model.predict(valX)
testPredict = model.predict(testX)
testR2 = r2_score(testY, testPredict)
print('Test R2: %.2f ' % testR2)
valR2 = r2_score(valY, valPredict)
print('Val R2: %.2f ' % valR2)
trainR2 = r2_score(trainY, trainPredict)
print('Train R2: %.2f ' % trainR2)
feature_i = 0
plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),
label='Test_obs')
plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].
ravel(), label='Test_pred')
plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),
label='Val_obs')
plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(
), label='Val_pred')
plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),
label='Train_obs')
plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].
ravel(), label='Train_pred')
plt.xticks(rotation=45)
plt.legend()
plt.show()
<mask token>
def statefulLSTM(self, stock_h):
dataset = self.get_features(stock_h, num_of_features=1)
train, test = self.split_dataset(dataset, '2017-01-01')
val, test = self.split_dataset(test, '2019-01-01')
batch_size = 1
look_back = 3
EPOCHS = 25
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
if os.path.exists('models\\stateful_lstm.h5'):
model = tensorflow.keras.models.load_model(
'models\\stateful_lstm.h5')
else:
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
for i in range(EPOCHS):
print(f'[INFO] EPOCH: {i}/{EPOCHS}')
model.fit(trainX, trainY, epochs=1, batch_size=batch_size,
verbose=2, shuffle=False, validation_data=(valX, valY))
model.save('models\\stateful_lstm.h5')
trainPredict = model.predict(trainX, batch_size=batch_size)
testPredict = model.predict(testX, batch_size=batch_size)
trainScore = math.sqrt(mean_squared_error(trainY[:, 0],
trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % trainScore)
testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,
0]))
print('Test Score: %.2f RMSE' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
|
<mask token>
class simpleLSTM:
def __init__(self):
self.MODEL_PATH = 'models\\basic_lstm.h5'
def create_dataset(self, dataset, look_back=4):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset.iloc[i:i + look_back]
dataX.append(a)
dataY.append(dataset.iloc[i + look_back])
return np.array(dataX), np.array(dataY)
<mask token>
def split_dataset(self, dataset, split_date, initial_data_cut=None,
type='start'):
if initial_data_cut != None:
split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')
if type == 'start':
dataset = dataset.loc[split_date_old:]
if type == 'end':
dataset = dataset.loc[:split_date_old]
split_date = pd.Timestamp(split_date + ' 00:00:00')
train = dataset.loc[:split_date]
test = dataset.loc[split_date:]
print(f'Train: {len(train)}, Test: {len(test)}')
return train, test
def LSTM_CNN(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2019-01-01')
train, val = self.split_dataset(train, '2014-01-01')
batch_size = 1
look_back = 3
EPOCHS = 100
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = False
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
print('[INFO] MODEL LOADED...')
else:
input_shape = num_of_features, look_back
model = Sequential()
model.add(LSTM(32, activation='relu', input_shape=input_shape))
model.add(Dropout(0.2))
model.add(Dense(num_of_features, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)
history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,
validation_data=(valX, valY))
model.save(self.MODEL_PATH)
print('[INFO] MODEL SAVED...')
trainPredict = model.predict(trainX)
valPredict = model.predict(valX)
testPredict = model.predict(testX)
testR2 = r2_score(testY, testPredict)
print('Test R2: %.2f ' % testR2)
valR2 = r2_score(valY, valPredict)
print('Val R2: %.2f ' % valR2)
trainR2 = r2_score(trainY, trainPredict)
print('Train R2: %.2f ' % trainR2)
feature_i = 0
plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),
label='Test_obs')
plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].
ravel(), label='Test_pred')
plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),
label='Val_obs')
plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(
), label='Val_pred')
plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),
label='Train_obs')
plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].
ravel(), label='Train_pred')
plt.xticks(rotation=45)
plt.legend()
plt.show()
def basicLSTM(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2016-01-01',
initial_data_cut='2019-01-01')
train, val = self.split_dataset(train, '2012-01-01')
look_back = 5
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = True
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
else:
model = Sequential()
model.add(LSTM(32, input_shape=(num_of_features, look_back)))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,
validation_data=(valX, valY), callbacks=[early_stop])
model.save(self.MODEL_PATH)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainScore = r2_score(trainY, trainPredict)
print('R2 Train Score: %.2f' % trainScore)
testScore = r2_score(testY, testPredict)
print('R2 Test Score: %.2f' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
def statefulLSTM(self, stock_h):
dataset = self.get_features(stock_h, num_of_features=1)
train, test = self.split_dataset(dataset, '2017-01-01')
val, test = self.split_dataset(test, '2019-01-01')
batch_size = 1
look_back = 3
EPOCHS = 25
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
if os.path.exists('models\\stateful_lstm.h5'):
model = tensorflow.keras.models.load_model(
'models\\stateful_lstm.h5')
else:
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
for i in range(EPOCHS):
print(f'[INFO] EPOCH: {i}/{EPOCHS}')
model.fit(trainX, trainY, epochs=1, batch_size=batch_size,
verbose=2, shuffle=False, validation_data=(valX, valY))
model.save('models\\stateful_lstm.h5')
trainPredict = model.predict(trainX, batch_size=batch_size)
testPredict = model.predict(testX, batch_size=batch_size)
trainScore = math.sqrt(mean_squared_error(trainY[:, 0],
trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % trainScore)
testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,
0]))
print('Test Score: %.2f RMSE' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
|
<mask token>
class simpleLSTM:
def __init__(self):
self.MODEL_PATH = 'models\\basic_lstm.h5'
def create_dataset(self, dataset, look_back=4):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset.iloc[i:i + look_back]
dataX.append(a)
dataY.append(dataset.iloc[i + look_back])
return np.array(dataX), np.array(dataY)
def get_features(self, stock_h, num_of_features=1):
if num_of_features == 1:
dataset = stock_h[['Close']]
elif num_of_features == 2:
dataset = stock_h[['Close', 'Open']]
elif num_of_features == 4:
dataset = stock_h[['Close', 'Open', 'Low', 'High']]
elif num_of_features == 5:
dataset = stock_h[['Close', 'Open', 'Low', 'High', 'Volume']]
return dataset
def split_dataset(self, dataset, split_date, initial_data_cut=None,
type='start'):
if initial_data_cut != None:
split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')
if type == 'start':
dataset = dataset.loc[split_date_old:]
if type == 'end':
dataset = dataset.loc[:split_date_old]
split_date = pd.Timestamp(split_date + ' 00:00:00')
train = dataset.loc[:split_date]
test = dataset.loc[split_date:]
print(f'Train: {len(train)}, Test: {len(test)}')
return train, test
def LSTM_CNN(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2019-01-01')
train, val = self.split_dataset(train, '2014-01-01')
batch_size = 1
look_back = 3
EPOCHS = 100
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = False
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
print('[INFO] MODEL LOADED...')
else:
input_shape = num_of_features, look_back
model = Sequential()
model.add(LSTM(32, activation='relu', input_shape=input_shape))
model.add(Dropout(0.2))
model.add(Dense(num_of_features, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)
history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,
validation_data=(valX, valY))
model.save(self.MODEL_PATH)
print('[INFO] MODEL SAVED...')
trainPredict = model.predict(trainX)
valPredict = model.predict(valX)
testPredict = model.predict(testX)
testR2 = r2_score(testY, testPredict)
print('Test R2: %.2f ' % testR2)
valR2 = r2_score(valY, valPredict)
print('Val R2: %.2f ' % valR2)
trainR2 = r2_score(trainY, trainPredict)
print('Train R2: %.2f ' % trainR2)
feature_i = 0
plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),
label='Test_obs')
plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].
ravel(), label='Test_pred')
plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),
label='Val_obs')
plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(
), label='Val_pred')
plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),
label='Train_obs')
plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].
ravel(), label='Train_pred')
plt.xticks(rotation=45)
plt.legend()
plt.show()
def basicLSTM(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2016-01-01',
initial_data_cut='2019-01-01')
train, val = self.split_dataset(train, '2012-01-01')
look_back = 5
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = True
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
else:
model = Sequential()
model.add(LSTM(32, input_shape=(num_of_features, look_back)))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,
validation_data=(valX, valY), callbacks=[early_stop])
model.save(self.MODEL_PATH)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainScore = r2_score(trainY, trainPredict)
print('R2 Train Score: %.2f' % trainScore)
testScore = r2_score(testY, testPredict)
print('R2 Test Score: %.2f' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
def statefulLSTM(self, stock_h):
dataset = self.get_features(stock_h, num_of_features=1)
train, test = self.split_dataset(dataset, '2017-01-01')
val, test = self.split_dataset(test, '2019-01-01')
batch_size = 1
look_back = 3
EPOCHS = 25
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
if os.path.exists('models\\stateful_lstm.h5'):
model = tensorflow.keras.models.load_model(
'models\\stateful_lstm.h5')
else:
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
for i in range(EPOCHS):
print(f'[INFO] EPOCH: {i}/{EPOCHS}')
model.fit(trainX, trainY, epochs=1, batch_size=batch_size,
verbose=2, shuffle=False, validation_data=(valX, valY))
model.save('models\\stateful_lstm.h5')
trainPredict = model.predict(trainX, batch_size=batch_size)
testPredict = model.predict(testX, batch_size=batch_size)
trainScore = math.sqrt(mean_squared_error(trainY[:, 0],
trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % trainScore)
testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,
0]))
print('Test Score: %.2f RMSE' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
|
import numpy as np
import pandas as pd
import math
import sklearn
import sklearn.preprocessing
import datetime
import os
import matplotlib.pyplot as plt
import yfinance as yf
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score
import tensorflow
class simpleLSTM:
def __init__(self):
self.MODEL_PATH = 'models\\basic_lstm.h5'
def create_dataset(self, dataset, look_back=4):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset.iloc[i:i + look_back]
dataX.append(a)
dataY.append(dataset.iloc[i + look_back])
return np.array(dataX), np.array(dataY)
def get_features(self, stock_h, num_of_features=1):
if num_of_features == 1:
dataset = stock_h[['Close']]
elif num_of_features == 2:
dataset = stock_h[['Close', 'Open']]
elif num_of_features == 4:
dataset = stock_h[['Close', 'Open', 'Low', 'High']]
elif num_of_features == 5:
dataset = stock_h[['Close', 'Open', 'Low', 'High', 'Volume']]
return dataset
def split_dataset(self, dataset, split_date, initial_data_cut=None,
type='start'):
if initial_data_cut != None:
split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')
if type == 'start':
dataset = dataset.loc[split_date_old:]
if type == 'end':
dataset = dataset.loc[:split_date_old]
split_date = pd.Timestamp(split_date + ' 00:00:00')
train = dataset.loc[:split_date]
test = dataset.loc[split_date:]
print(f'Train: {len(train)}, Test: {len(test)}')
return train, test
def LSTM_CNN(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2019-01-01')
train, val = self.split_dataset(train, '2014-01-01')
batch_size = 1
look_back = 3
EPOCHS = 100
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = False
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
print('[INFO] MODEL LOADED...')
else:
input_shape = num_of_features, look_back
model = Sequential()
model.add(LSTM(32, activation='relu', input_shape=input_shape))
model.add(Dropout(0.2))
model.add(Dense(num_of_features, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)
history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,
validation_data=(valX, valY))
model.save(self.MODEL_PATH)
print('[INFO] MODEL SAVED...')
trainPredict = model.predict(trainX)
valPredict = model.predict(valX)
testPredict = model.predict(testX)
testR2 = r2_score(testY, testPredict)
print('Test R2: %.2f ' % testR2)
valR2 = r2_score(valY, valPredict)
print('Val R2: %.2f ' % valR2)
trainR2 = r2_score(trainY, trainPredict)
print('Train R2: %.2f ' % trainR2)
feature_i = 0
plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),
label='Test_obs')
plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].
ravel(), label='Test_pred')
plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),
label='Val_obs')
plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(
), label='Val_pred')
plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),
label='Train_obs')
plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].
ravel(), label='Train_pred')
plt.xticks(rotation=45)
plt.legend()
plt.show()
def basicLSTM(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2016-01-01',
initial_data_cut='2019-01-01')
train, val = self.split_dataset(train, '2012-01-01')
look_back = 5
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = True
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
else:
model = Sequential()
model.add(LSTM(32, input_shape=(num_of_features, look_back)))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,
validation_data=(valX, valY), callbacks=[early_stop])
model.save(self.MODEL_PATH)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainScore = r2_score(trainY, trainPredict)
print('R2 Train Score: %.2f' % trainScore)
testScore = r2_score(testY, testPredict)
print('R2 Test Score: %.2f' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
def statefulLSTM(self, stock_h):
dataset = self.get_features(stock_h, num_of_features=1)
train, test = self.split_dataset(dataset, '2017-01-01')
val, test = self.split_dataset(test, '2019-01-01')
batch_size = 1
look_back = 3
EPOCHS = 25
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
if os.path.exists('models\\stateful_lstm.h5'):
model = tensorflow.keras.models.load_model(
'models\\stateful_lstm.h5')
else:
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
for i in range(EPOCHS):
print(f'[INFO] EPOCH: {i}/{EPOCHS}')
model.fit(trainX, trainY, epochs=1, batch_size=batch_size,
verbose=2, shuffle=False, validation_data=(valX, valY))
model.save('models\\stateful_lstm.h5')
trainPredict = model.predict(trainX, batch_size=batch_size)
testPredict = model.predict(testX, batch_size=batch_size)
trainScore = math.sqrt(mean_squared_error(trainY[:, 0],
trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % trainScore)
testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,
0]))
print('Test Score: %.2f RMSE' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
|
import numpy as np
import pandas as pd
import math
import sklearn
import sklearn.preprocessing
import datetime
import os
import matplotlib.pyplot as plt
import yfinance as yf
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score
import tensorflow
class simpleLSTM:
def __init__(self):
self.MODEL_PATH = r"models\basic_lstm.h5"
def create_dataset(self, dataset, look_back=4):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset.iloc[i:(i + look_back)]
dataX.append(a)
dataY.append(dataset.iloc[i + look_back])
# dataY.append(dataset.iloc[i + look_back][0])
return np.array(dataX), np.array(dataY)
def get_features(self, stock_h, num_of_features=1):
if num_of_features == 1:
dataset = stock_h[["Close"]]
elif num_of_features == 2:
dataset = stock_h[["Close", "Open"]]
elif num_of_features == 4:
dataset = stock_h[["Close", "Open", "Low", "High"]]
elif num_of_features == 5:
dataset = stock_h[["Close", "Open", "Low", "High", "Volume"]]
return dataset
def split_dataset(self, dataset, split_date, initial_data_cut=None, type="start"):
if initial_data_cut != None:
split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')
if type == "start":
dataset = dataset.loc[split_date_old:]
if type == "end":
dataset = dataset.loc[:split_date_old]
split_date = pd.Timestamp(split_date + ' 00:00:00')
train = dataset.loc[:split_date]
test = dataset.loc[split_date:]
# train_size = int(len(dataset) * 0.67)
# test_size = len(dataset) - train_size
# train = dataset[0:train_size, :]
# test = dataset[train_size:len(dataset), :]
# print(len(train), len(test))
print(f"Train: {len(train)}, Test: {len(test)}")
return train, test
def LSTM_CNN(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
# train, test = self.split_dataset(dataset, "2020-09-01", initial_data_cut="2020-01-01", type="start")
# train, test = self.split_dataset(dataset, "2017-02-01")
# val, test = self.split_dataset(test, "2021-01-01")
# train, test = self.split_dataset(dataset, "2017-01-01", initial_data_cut="2019-01-01", type="end")
train, test = self.split_dataset(dataset, "2019-01-01")
train, val = self.split_dataset(train, "2014-01-01")
batch_size = 1
look_back = 3
EPOCHS = 100
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features, trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = False
# It can be used to reconstruct the model identically.
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
print("[INFO] MODEL LOADED...")
else:
# input_shape = (look_back, 1)
input_shape = (num_of_features, look_back)
model = Sequential()
model.add(
LSTM(32, activation="relu", input_shape=input_shape))
# model.add(
# Conv1D(filters=32, kernel_size=5, strides=1, padding="same", activation="relu",
# input_shape=input_shape))
# lstm_model.add(Dropout(0.1))
model.add(Dropout(0.2))
model.add(Dense(num_of_features, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)
# callbacks=[early_stop]
history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1, validation_data=(valX, valY))
model.save(self.MODEL_PATH)
print("[INFO] MODEL SAVED...")
trainPredict = model.predict(trainX)
valPredict = model.predict(valX)
testPredict = model.predict(testX)
# testR2 = r2_score(testY[:, 0], testPredict[:, 0])
# print('Test R2: %.2f ' % (testR2))
# valR2 = r2_score(valY[:, 0], valPredict[:, 0])
# print('Val R2: %.2f ' % (valR2))
# trainR2 = r2_score(trainY[:, 0], trainPredict[:, 0])
# print('Train R2: %.2f ' % (trainR2))
testR2 = r2_score(testY, testPredict)
print('Test R2: %.2f ' % (testR2))
valR2 = r2_score(valY, valPredict)
print('Val R2: %.2f ' % (valR2))
trainR2 = r2_score(trainY, trainPredict)
print('Train R2: %.2f ' % (trainR2))
feature_i = 0
plt.plot(test.index[look_back+1:], testY[:, feature_i].ravel(), label="Test_obs")
plt.plot(test.index[look_back+1:], testPredict[:, feature_i].ravel(), label="Test_pred")
plt.plot(val.index[look_back+1:], valY[:, feature_i].ravel(), label="Val_obs")
plt.plot(val.index[look_back+1:], valPredict[:, feature_i].ravel(), label="Val_pred")
plt.plot(train.index[look_back+1:], trainY[:, feature_i].ravel(), label="Train_obs")
plt.plot(train.index[look_back+1:], trainPredict[:, feature_i].ravel(), label="Train_pred")
plt.xticks(rotation=45)
plt.legend()
plt.show()
def basicLSTM(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, "2016-01-01", initial_data_cut="2019-01-01")
# train, test = self.split_dataset(dataset, "2018-01-01")
train, val = self.split_dataset(train, "2012-01-01")
look_back = 5
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features, trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = True
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
else:
model = Sequential()
model.add(LSTM(32, input_shape=(num_of_features, look_back)))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2, validation_data=(valX, valY),
callbacks=[early_stop])
model.save(self.MODEL_PATH)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))
# print('Train Score: %.2f RMSE' % (trainScore))
# testScore = math.sqrt(mean_squared_error(testY, testPredict))
# print('Test Score: %.2f RMSE' % (testScore))
trainScore = r2_score(trainY, trainPredict)
print('R2 Train Score: %.2f' % (trainScore))
testScore = r2_score(testY, testPredict)
print('R2 Test Score: %.2f' % (testScore))
plt.plot(testY)
plt.plot(testPredict)
plt.show()
def statefulLSTM(self, stock_h):
dataset = self.get_features(stock_h, num_of_features=1)
# train, test = split_dataset(dataset, "2019-01-01", initial_data_cut="2018-01-01")
train, test = self.split_dataset(dataset, "2017-01-01")
val, test = self.split_dataset(test, "2019-01-01")
batch_size = 1
look_back = 3
EPOCHS = 25
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
# trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
# valX = np.reshape(valX, (valX.shape[0], 1, valX.shape[1]))
# testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
# It can be used to reconstruct the model identically.
if os.path.exists("models\stateful_lstm.h5"):
model = tensorflow.keras.models.load_model("models\stateful_lstm.h5")
else:
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
for i in range(EPOCHS):
print(f"[INFO] EPOCH: {i}/{EPOCHS}")
model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False, validation_data=(valX, valY))
# model.reset_states()
model.save("models\stateful_lstm.h5")
# model.save("stateful_lstm")
# model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=2, validation_data=(valX, valY),
# callbacks=[early_stop])
trainPredict = model.predict(trainX, batch_size=batch_size)
# model.reset_states()
testPredict = model.predict(testX, batch_size=batch_size)
# trainPredict = model.predict(trainX)
# testPredict = model.predict(testX)
# trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))
# print('Train Score: %.2f RMSE' % (trainScore))
# testScore = math.sqrt(mean_squared_error(testY, testPredict))
# print('Test Score: %.2f RMSE' % (testScore))
#
trainScore = math.sqrt(mean_squared_error(trainY[:, 0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))
plt.plot(testY)
plt.plot(testPredict)
plt.show()
# # shift train predictions for plotting
# trainPredictPlot = np.empty_like(dataset)
# trainPredictPlot[:, :] = np.nan
# trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict
# # shift test predictions for plotting
# testPredictPlot = np.empty_like(dataset)
# testPredictPlot[:, :] = np.nan
# testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict
# # plot baseline and predictions
# # plt.plot(scaler.inverse_transform(dataset))
# plt.plot(trainPredictPlot)
# plt.plot(testPredictPlot)
# plt.show()
|
[
4,
7,
8,
9,
10
] |
2,450 |
ed7fa6e6f30eb06400cb38128617967a597f6c04
|
<mask token>
def greedy_motif_search(dnas, k, t):
best_motifs = [dna[:k] for dna in dnas]
best_score = score_motifs(best_motifs)
for i in range(len(dnas[0]) - k + 1):
print(i)
motifs = [dnas[0][i:i + k]]
for j in range(1, t):
motifs.append(profile_most_probable(dnas[j], k, form_profile(
motifs)))
score = score_motifs(motifs)
if score < best_score:
best_motifs = motifs
best_score = score
return best_motifs
def form_profile(motifs):
profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)
for motif in motifs:
for i, base in enumerate(motif):
profile.loc[base, i] += 1
return profile / len(motifs)
def score_motifs(motifs):
profile = form_profile(motifs)
consensus = ''.join(profile.idxmax())
return sum(hamming_distance(motif, consensus) for motif in motifs)
<mask token>
|
<mask token>
def greedy_motif_search(dnas, k, t):
best_motifs = [dna[:k] for dna in dnas]
best_score = score_motifs(best_motifs)
for i in range(len(dnas[0]) - k + 1):
print(i)
motifs = [dnas[0][i:i + k]]
for j in range(1, t):
motifs.append(profile_most_probable(dnas[j], k, form_profile(
motifs)))
score = score_motifs(motifs)
if score < best_score:
best_motifs = motifs
best_score = score
return best_motifs
def form_profile(motifs):
profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)
for motif in motifs:
for i, base in enumerate(motif):
profile.loc[base, i] += 1
return profile / len(motifs)
def score_motifs(motifs):
profile = form_profile(motifs)
consensus = ''.join(profile.idxmax())
return sum(hamming_distance(motif, consensus) for motif in motifs)
def main():
with open(filename) as f:
k, t = list(map(int, f.readline().strip().split()))
dnas = [line.strip() for line in f.readlines()]
for motif in greedy_motif_search(dnas, k, t):
print(motif)
if __name__ == '__main__':
main()
|
<mask token>
filename = 'rosalind_ba2d.txt'
BASES = ['A', 'C', 'G', 'T']
def greedy_motif_search(dnas, k, t):
best_motifs = [dna[:k] for dna in dnas]
best_score = score_motifs(best_motifs)
for i in range(len(dnas[0]) - k + 1):
print(i)
motifs = [dnas[0][i:i + k]]
for j in range(1, t):
motifs.append(profile_most_probable(dnas[j], k, form_profile(
motifs)))
score = score_motifs(motifs)
if score < best_score:
best_motifs = motifs
best_score = score
return best_motifs
def form_profile(motifs):
profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)
for motif in motifs:
for i, base in enumerate(motif):
profile.loc[base, i] += 1
return profile / len(motifs)
def score_motifs(motifs):
profile = form_profile(motifs)
consensus = ''.join(profile.idxmax())
return sum(hamming_distance(motif, consensus) for motif in motifs)
def main():
with open(filename) as f:
k, t = list(map(int, f.readline().strip().split()))
dnas = [line.strip() for line in f.readlines()]
for motif in greedy_motif_search(dnas, k, t):
print(motif)
if __name__ == '__main__':
main()
|
<mask token>
import pandas as pd
from ba1g import hamming_distance
from ba2c import profile_most_probable
filename = 'rosalind_ba2d.txt'
BASES = ['A', 'C', 'G', 'T']
def greedy_motif_search(dnas, k, t):
best_motifs = [dna[:k] for dna in dnas]
best_score = score_motifs(best_motifs)
for i in range(len(dnas[0]) - k + 1):
print(i)
motifs = [dnas[0][i:i + k]]
for j in range(1, t):
motifs.append(profile_most_probable(dnas[j], k, form_profile(
motifs)))
score = score_motifs(motifs)
if score < best_score:
best_motifs = motifs
best_score = score
return best_motifs
def form_profile(motifs):
profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)
for motif in motifs:
for i, base in enumerate(motif):
profile.loc[base, i] += 1
return profile / len(motifs)
def score_motifs(motifs):
profile = form_profile(motifs)
consensus = ''.join(profile.idxmax())
return sum(hamming_distance(motif, consensus) for motif in motifs)
def main():
with open(filename) as f:
k, t = list(map(int, f.readline().strip().split()))
dnas = [line.strip() for line in f.readlines()]
for motif in greedy_motif_search(dnas, k, t):
print(motif)
if __name__ == '__main__':
main()
|
'''
Implement GreedyMotifSearch
http://rosalind.info/problems/ba2d/
Given: Integers k and t, followed by a collection of strings Dna.
Return: A collection of strings BestMotifs resulting from running GreedyMotifSearch(Dna, k, t). If at any step you find more than one Profile-most probable k-mer in a given string, use the one occurring first.
'''
import pandas as pd
from ba1g import hamming_distance
from ba2c import profile_most_probable
filename = 'rosalind_ba2d.txt'
BASES = ['A', 'C', 'G', 'T']
def greedy_motif_search(dnas, k, t):
# took ~4 min to run on test dataset but seems to be the correct algorithm
# based on pseudocode (and other peoples' submissions)
best_motifs = [dna[:k] for dna in dnas]
best_score = score_motifs(best_motifs)
for i in range(len(dnas[0]) - k + 1):
print(i)
motifs = [dnas[0][i:i+k]]
for j in range(1, t):
motifs.append(profile_most_probable(dnas[j], k, form_profile(motifs)))
score = score_motifs(motifs)
if score < best_score:
best_motifs = motifs
best_score = score
return best_motifs
def form_profile(motifs):
profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)
for motif in motifs:
for i, base in enumerate(motif):
profile.loc[base, i] += 1
return profile / len(motifs)
def score_motifs(motifs):
# couldn't figure out what 'score' from pseudocode meant :(
# had to reference someone else's code:
# https://github.com/NathanielLovin/Rosalind/blob/master/BA2D.py
profile = form_profile(motifs)
# neat df function generates the consensus string
consensus = ''.join(profile.idxmax())
return sum(hamming_distance(motif, consensus) for motif in motifs)
def main():
with open(filename) as f:
k, t = list(map(int, f.readline().strip().split()))
dnas = [line.strip() for line in f.readlines()]
for motif in greedy_motif_search(dnas, k, t):
print(motif)
if __name__ == '__main__':
main()
|
[
3,
5,
6,
7,
8
] |
2,451 |
b46fe26f1a3c9e93e735b752e54132bd95408251
|
<mask token>
class MongoTest:
<mask token>
try:
client = MongoClient(
'mongodb://root:root@localhost:27017/test?authSource=admin')
print('init mongo client:', client)
except Exception as e:
logging.exception(e)
@classmethod
def get_connection(cls) ->MongoClient:
return cls.client or MongoClient(
'mongodb://root:root@localhost:27017/test?authSource=admin')
@classmethod
def insert(cls, db: str, collection: str, data: dict) ->InsertOneResult:
return cls.client.get_database(db).get_collection(collection
).insert_one(data)
<mask token>
<mask token>
@classmethod
def update(cls, db: str, collection: str, condition: dict, update: dict
) ->UpdateResult:
return cls.client.get_database(db).get_collection(collection
).update_one(condition, update)
<mask token>
|
<mask token>
class MongoTest:
client = None
try:
client = MongoClient(
'mongodb://root:root@localhost:27017/test?authSource=admin')
print('init mongo client:', client)
except Exception as e:
logging.exception(e)
@classmethod
def get_connection(cls) ->MongoClient:
return cls.client or MongoClient(
'mongodb://root:root@localhost:27017/test?authSource=admin')
@classmethod
def insert(cls, db: str, collection: str, data: dict) ->InsertOneResult:
return cls.client.get_database(db).get_collection(collection
).insert_one(data)
@classmethod
def find(cls, db: str, collection: str, condition: dict) ->Cursor:
return cls.client.get_database(db).get_collection(collection).find(
condition)
@classmethod
def delete(cls, db: str, collection: str, condition: dict) ->DeleteResult:
return cls.client.get_database(db).get_collection(collection
).delete_one(condition)
@classmethod
def update(cls, db: str, collection: str, condition: dict, update: dict
) ->UpdateResult:
return cls.client.get_database(db).get_collection(collection
).update_one(condition, update)
<mask token>
|
<mask token>
class MongoTest:
client = None
try:
client = MongoClient(
'mongodb://root:root@localhost:27017/test?authSource=admin')
print('init mongo client:', client)
except Exception as e:
logging.exception(e)
@classmethod
def get_connection(cls) ->MongoClient:
return cls.client or MongoClient(
'mongodb://root:root@localhost:27017/test?authSource=admin')
@classmethod
def insert(cls, db: str, collection: str, data: dict) ->InsertOneResult:
return cls.client.get_database(db).get_collection(collection
).insert_one(data)
@classmethod
def find(cls, db: str, collection: str, condition: dict) ->Cursor:
return cls.client.get_database(db).get_collection(collection).find(
condition)
@classmethod
def delete(cls, db: str, collection: str, condition: dict) ->DeleteResult:
return cls.client.get_database(db).get_collection(collection
).delete_one(condition)
@classmethod
def update(cls, db: str, collection: str, condition: dict, update: dict
) ->UpdateResult:
return cls.client.get_database(db).get_collection(collection
).update_one(condition, update)
if __name__ == '__main__':
for result in MongoTest.find('test', 'inventory', {}):
pprint(result)
MongoTest.delete('test', 'inventory', {'item': 'pymongo20201008204049'})
MongoTest.update('test', 'inventory', {'item': 'pymongo'}, {'$set': {
'size.uom': 'cm', 'status': 'P'}, '$currentDate': {'lastModified':
True}})
|
<mask token>
import logging
import time
import traceback
from pprint import pprint
from pymongo import MongoClient
from pymongo.cursor import Cursor
from pymongo.results import DeleteResult, InsertOneResult, UpdateResult
class MongoTest:
client = None
try:
client = MongoClient(
'mongodb://root:root@localhost:27017/test?authSource=admin')
print('init mongo client:', client)
except Exception as e:
logging.exception(e)
@classmethod
def get_connection(cls) ->MongoClient:
return cls.client or MongoClient(
'mongodb://root:root@localhost:27017/test?authSource=admin')
@classmethod
def insert(cls, db: str, collection: str, data: dict) ->InsertOneResult:
return cls.client.get_database(db).get_collection(collection
).insert_one(data)
@classmethod
def find(cls, db: str, collection: str, condition: dict) ->Cursor:
return cls.client.get_database(db).get_collection(collection).find(
condition)
@classmethod
def delete(cls, db: str, collection: str, condition: dict) ->DeleteResult:
return cls.client.get_database(db).get_collection(collection
).delete_one(condition)
@classmethod
def update(cls, db: str, collection: str, condition: dict, update: dict
) ->UpdateResult:
return cls.client.get_database(db).get_collection(collection
).update_one(condition, update)
if __name__ == '__main__':
for result in MongoTest.find('test', 'inventory', {}):
pprint(result)
MongoTest.delete('test', 'inventory', {'item': 'pymongo20201008204049'})
MongoTest.update('test', 'inventory', {'item': 'pymongo'}, {'$set': {
'size.uom': 'cm', 'status': 'P'}, '$currentDate': {'lastModified':
True}})
|
# -*- coding: utf-8 -*-
"""
测试如何使用python的pymongo模块操作MongoDB
@author: hch
@date : 2020/10/8
"""
import logging
import time
import traceback
from pprint import pprint
from pymongo import MongoClient
from pymongo.cursor import Cursor
from pymongo.results import DeleteResult, InsertOneResult, UpdateResult
class MongoTest:
client = None
try:
client = MongoClient('mongodb://root:root@localhost:27017/test?authSource=admin')
print('init mongo client:', client)
except Exception as e:
# traceback.print_exc()
logging.exception(e)
@classmethod
def get_connection(cls) -> MongoClient:
return cls.client or MongoClient('mongodb://root:root@localhost:27017/test?authSource=admin')
@classmethod
def insert(cls, db: str, collection: str, data: dict) -> InsertOneResult:
return cls.client.get_database(db).get_collection(collection).insert_one(data)
@classmethod
def find(cls, db: str, collection: str, condition: dict) -> Cursor:
return cls.client.get_database(db).get_collection(collection).find(condition)
@classmethod
def delete(cls, db: str, collection: str, condition: dict) -> DeleteResult:
return cls.client.get_database(db).get_collection(collection).delete_one(condition)
@classmethod
def update(cls, db: str, collection: str, condition: dict, update: dict) -> UpdateResult:
return cls.client.get_database(db).get_collection(collection).update_one(condition, update)
if __name__ == '__main__':
# client = MongoTest.get_connection()
# client = MongoClient('mongodb://root@localhost:27017/test?authSource=admin')
# print(client.test.__class__) # <class 'pymongo.database.Database'>
# print(client.test.inventory.__class__) # <class 'pymongo.collection.Collection'>
# client.test.inventory.insert_one(
# {
# "item": "pymongo",
# "qty": 100,
# "tags": ["cotton"],
# "size": {"h": 28, "w": 35.5, "uom": "cm"}
# }
# )
# MongoTest.insert('test', 'inventory',
# {
# "item": "pymongo" + time.strftime('%Y%m%d%H%M%S', time.localtime()),
# "qty": 100,
# "tags": ["cotton"],
# "size": {"h": 28, "w": 35.5, "uom": "cm"}
# }
# )
for result in MongoTest.find('test', 'inventory', {}):
pprint(result)
MongoTest.delete('test', 'inventory', {'item': 'pymongo20201008204049'})
MongoTest.update('test', 'inventory', {"item": "pymongo"},
{"$set": {"size.uom": "cm", "status": "P"},
"$currentDate": {"lastModified": True}})
|
[
4,
7,
8,
9,
10
] |
2,452 |
9583a97ae4b1fbf5ecdf33d848b13bf0b28d2eb4
|
<mask token>
|
<mask token>
add(2, 2)
sub(2, 3)
|
from package.pack import *
add(2, 2)
sub(2, 3)
|
from package.pack import *
add(2,2)
sub(2,3)
| null |
[
0,
1,
2,
3
] |
2,453 |
55cf99e3493c9c94955fc7e75ac428cbd88ac5cf
|
<mask token>
def preProcesar(request):
id_archivo = request.GET.get('id_archivo')
archivo = DataArchivoCargueProcesar.objects.filter(id=id_archivo).last()
valores, columnas = iniPreviw(id_archivo, archivo.
archivocargueprocesararchivo, archivo.
archivocargueprocesararchivotipocargue.id, archivo.
archivocargueprocesararchivoobservacion)
iniPreviws = dict(valores=valores, columnas=columnas.tolist(), id=
id_archivo)
return HttpResponse(json.dumps(iniPreviws), content_type='application/json'
)
def ProcesarArchivo(request, idAsiganacion):
datoUsu = datosUsu(request.user.id)
print('Aca llego', idAsiganacion)
return render(request, 'cargueArchivos/procesandoArchivo.html', {
'datoUsu': datoUsu, 'id_archivo': idAsiganacion})
<mask token>
def getFalabella(request):
datoUsu = datosUsu(request.user.id)
inner_qs = DataAsignacion.objects.filter(asignacion_cliente='Falabella')
lista = []
for x in inner_qs:
lista.append(x.id)
pass
campFalabella = DataAsignacionarchivosStraus.objects.filter(
archivos_asignacion__in=lista)
vista = 0
return render(request, 'cargueArchivos/falabella.html', {
'idAsiganacion': 0, 'idFile': 0, 'campFalabella': campFalabella,
'vista': vista, 'datoUsu': datoUsu})
<mask token>
def crezcamosCampanacrear(request):
datoUsu = datosUsu(request.user.id)
if request.method == 'POST':
strs_nombre = request.POST['archivos_nombre']
porta = getValidate(strs_nombre, 'Crezcamos', request.user.id,
'Crezcamos')
if porta == 'Exite':
messages = 1
form = UploadArchivosAsignacion()
return render(request,
'cargueArchivos/Crezcamos/archivosProcesarCreateCrezcamos.html'
, {'datoUsu': datoUsu, 'messages': messages, 'form': form,
'portafolio': strs_nombre})
else:
form = UploadArchivosAsignacion(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1, valores2, valores, resultados = (
getCrezcamosClientePreview(porta, 'Crezcamos', idForm.
id, request.user.id))
vista = 1
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1, lista1=lista2, lista2=
lista3))
return render(request,
'cargueArchivos/Crezcamos/crezcamos.html', {'datoUsu':
datoUsu, 'idFile': idForm.id, 'lista': listaFin,
'vista': vista, 'idAsiganacion': resultados})
else:
print('No se esta validando el formulario')
pass
pass
else:
form = UploadArchivosAsignacion()
return render(request,
'cargueArchivos/Crezcamos/archivosProcesarCreateCrezcamos.html', {
'datoUsu': datoUsu, 'form': form})
<mask token>
def getUpdateCrezcamos(request):
datoUsu = datosUsu(request.user.id)
clie = DataClientesStraus.objects.filter(cliente_nombre='Crezcamos').last()
inner_qs = DataAsignacion.objects.filter(portafolio_usuario=request.
user.id, portafolio_cliente=clie.id)
if request.is_ajax():
idPortafolio = str(request.GET.get('id', None))
idPortafolioValor = str(request.GET.get('valor', None))
idPortafolioDato = str(request.GET.get('dato', None))
if (idPortafolioDato == '1') | (idPortafolioDato == 1):
DataAsignacion.objects.filter(id=idPortafolio).update(
portafolio_contrapropuesta=idPortafolioValor)
else:
DataAsignacion.objects.filter(id=idPortafolio).update(
portafolio_descuentos=idPortafolioValor)
pass
response = {'tipo': 'ok'}
return HttpResponse(json.dumps(response), content_type=
'application/json')
else:
if request.method == 'POST':
form = UploadArchivos(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1, valores2, valores = getCrezcamosClientePreviewUdate(
idForm.id)
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1, lista1=lista2, lista2=
lista3))
vista = 1
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html'
, {'idFile': idForm.id, 'lista': listaFin, 'vista':
vista, 'form': form, 'campCrezcamos': inner_qs,
'datoUsu': datoUsu})
else:
print('No se esta validando el formulario')
pass
else:
form = UploadArchivos()
vista = 0
idForm = 0
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',
{'idFile': idForm, 'vista': vista, 'form': form,
'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
def trazabilidad(request):
datoUsu = datosUsu(request.user.id)
clie = 'Crezcamos'
inner_qs = DataAsignacion.objects.filter(portafolio_usuario=request.
user.id, portafolio_cliente=clie.id)
if request.method == 'POST':
form = UploadArchivos(request.POST, request.FILES)
print(form)
if form.is_valid():
idForm = form.save()
getCrezcamosClientePreviewUdate(idForm.id)
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',
{'form': form, 'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
else:
print('No se esta validando el formulario')
pass
else:
print(2)
form = UploadArchivos()
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html', {
'form': form, 'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
def limpiar(request):
datoUsu = datosUsu(request.user.id)
deleteDatosOrigen = DataUbicacionInfoOrigen.objects.all()
deleteDatosOrigen.delete()
deleteUbicaEmp = DataUbicacionEmpresa.objects.all()
deleteUbicaEmp.delete()
deleteUbica = DataUbicacion.objects.all()
deleteUbica.delete()
deleteCorreos = DataCorreoelectronico.objects.all()
deleteCorreos.delete()
deleteTele = DataTelefonos.objects.all()
deleteTele.delete()
deleteObliga = DataObligacion.objects.all()
deleteObliga.delete()
deletePersonas = DataPersonas.objects.all()
deletePersonas.delete()
deletePortaArchivoStra = DataAsignacionarchivosStraus.objects.all()
deletePortaArchivoStra.delete()
deleteArchiStra = DataarchivosStraus.objects.all()
deleteArchiStra.delete()
deletePorta = DataAsignacion.objects.all()
deletePorta.delete()
return render(request, 'cargueArchivos/limpiar.html', {'datoUsu': datoUsu})
|
<mask token>
def preProcesar(request):
id_archivo = request.GET.get('id_archivo')
archivo = DataArchivoCargueProcesar.objects.filter(id=id_archivo).last()
valores, columnas = iniPreviw(id_archivo, archivo.
archivocargueprocesararchivo, archivo.
archivocargueprocesararchivotipocargue.id, archivo.
archivocargueprocesararchivoobservacion)
iniPreviws = dict(valores=valores, columnas=columnas.tolist(), id=
id_archivo)
return HttpResponse(json.dumps(iniPreviws), content_type='application/json'
)
def ProcesarArchivo(request, idAsiganacion):
datoUsu = datosUsu(request.user.id)
print('Aca llego', idAsiganacion)
return render(request, 'cargueArchivos/procesandoArchivo.html', {
'datoUsu': datoUsu, 'id_archivo': idAsiganacion})
def ProcesarArchivoUpdate(request, id_archivo):
datoUsu = datosUsu(request.user.id)
return render(request, 'cargueArchivos/procesandoArchivoUpdate.html', {
'datoUsu': datoUsu, 'id_archivo': id_archivo})
def ProcesarArchivoFinal(request, idAsiganacion):
datoUsu = datosUsu(request.user.id)
print('Star')
ejecucionInicial(idAsiganacion)
contexto = {}
return render(request, 'cargueArchivos/procesandoArchivoOk.html', contexto)
<mask token>
def getFalabella(request):
datoUsu = datosUsu(request.user.id)
inner_qs = DataAsignacion.objects.filter(asignacion_cliente='Falabella')
lista = []
for x in inner_qs:
lista.append(x.id)
pass
campFalabella = DataAsignacionarchivosStraus.objects.filter(
archivos_asignacion__in=lista)
vista = 0
return render(request, 'cargueArchivos/falabella.html', {
'idAsiganacion': 0, 'idFile': 0, 'campFalabella': campFalabella,
'vista': vista, 'datoUsu': datoUsu})
def falabellaCampanacrear(request):
print(request.POST)
datoUsu = datosUsu(request.user.id)
if request.method == 'POST':
strs_nombre = request.POST['archivos_nombre']
porta = getValidate(strs_nombre, 'Falabella', request.user.id,
'Falabella')
if porta == 'Exite':
messages = 1
form = UploadArchivosAsignacion()
resultados = 0
return render(request,
'cargueArchivos/archivosProcesarCreateFalabella.html', {
'idAsiganacion': resultados, 'messages': messages, 'form':
form, 'portafolio': strs_nombre, 'datoUsu': datoUsu})
else:
form = UploadArchivosAsignacion(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1, valores2, valores, resultados = (
getFalabellaClientePreview(porta, 'Falabella', idForm.
id, request.user.id, strs_nombre))
vista = 1
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1, lista1=lista2, lista2=
lista3))
return render(request, 'cargueArchivos/falabella.html', {
'idFile': idForm.id, 'lista': listaFin, 'vista': vista,
'datoUsu': datoUsu, 'idAsiganacion': resultados})
else:
print('No se esta validando el formulario')
pass
pass
else:
form = UploadArchivosAsignacion()
resultados = 0
return render(request,
'cargueArchivos/archivosProcesarCreateFalabella.html', {
'idAsiganacion': resultados, 'form': form, 'datoUsu': datoUsu})
<mask token>
def crezcamosCampanacrear(request):
datoUsu = datosUsu(request.user.id)
if request.method == 'POST':
strs_nombre = request.POST['archivos_nombre']
porta = getValidate(strs_nombre, 'Crezcamos', request.user.id,
'Crezcamos')
if porta == 'Exite':
messages = 1
form = UploadArchivosAsignacion()
return render(request,
'cargueArchivos/Crezcamos/archivosProcesarCreateCrezcamos.html'
, {'datoUsu': datoUsu, 'messages': messages, 'form': form,
'portafolio': strs_nombre})
else:
form = UploadArchivosAsignacion(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1, valores2, valores, resultados = (
getCrezcamosClientePreview(porta, 'Crezcamos', idForm.
id, request.user.id))
vista = 1
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1, lista1=lista2, lista2=
lista3))
return render(request,
'cargueArchivos/Crezcamos/crezcamos.html', {'datoUsu':
datoUsu, 'idFile': idForm.id, 'lista': listaFin,
'vista': vista, 'idAsiganacion': resultados})
else:
print('No se esta validando el formulario')
pass
pass
else:
form = UploadArchivosAsignacion()
return render(request,
'cargueArchivos/Crezcamos/archivosProcesarCreateCrezcamos.html', {
'datoUsu': datoUsu, 'form': form})
<mask token>
def getUpdateCrezcamos(request):
datoUsu = datosUsu(request.user.id)
clie = DataClientesStraus.objects.filter(cliente_nombre='Crezcamos').last()
inner_qs = DataAsignacion.objects.filter(portafolio_usuario=request.
user.id, portafolio_cliente=clie.id)
if request.is_ajax():
idPortafolio = str(request.GET.get('id', None))
idPortafolioValor = str(request.GET.get('valor', None))
idPortafolioDato = str(request.GET.get('dato', None))
if (idPortafolioDato == '1') | (idPortafolioDato == 1):
DataAsignacion.objects.filter(id=idPortafolio).update(
portafolio_contrapropuesta=idPortafolioValor)
else:
DataAsignacion.objects.filter(id=idPortafolio).update(
portafolio_descuentos=idPortafolioValor)
pass
response = {'tipo': 'ok'}
return HttpResponse(json.dumps(response), content_type=
'application/json')
else:
if request.method == 'POST':
form = UploadArchivos(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1, valores2, valores = getCrezcamosClientePreviewUdate(
idForm.id)
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1, lista1=lista2, lista2=
lista3))
vista = 1
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html'
, {'idFile': idForm.id, 'lista': listaFin, 'vista':
vista, 'form': form, 'campCrezcamos': inner_qs,
'datoUsu': datoUsu})
else:
print('No se esta validando el formulario')
pass
else:
form = UploadArchivos()
vista = 0
idForm = 0
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',
{'idFile': idForm, 'vista': vista, 'form': form,
'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
def trazabilidad(request):
datoUsu = datosUsu(request.user.id)
clie = 'Crezcamos'
inner_qs = DataAsignacion.objects.filter(portafolio_usuario=request.
user.id, portafolio_cliente=clie.id)
if request.method == 'POST':
form = UploadArchivos(request.POST, request.FILES)
print(form)
if form.is_valid():
idForm = form.save()
getCrezcamosClientePreviewUdate(idForm.id)
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',
{'form': form, 'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
else:
print('No se esta validando el formulario')
pass
else:
print(2)
form = UploadArchivos()
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html', {
'form': form, 'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
def limpiar(request):
datoUsu = datosUsu(request.user.id)
deleteDatosOrigen = DataUbicacionInfoOrigen.objects.all()
deleteDatosOrigen.delete()
deleteUbicaEmp = DataUbicacionEmpresa.objects.all()
deleteUbicaEmp.delete()
deleteUbica = DataUbicacion.objects.all()
deleteUbica.delete()
deleteCorreos = DataCorreoelectronico.objects.all()
deleteCorreos.delete()
deleteTele = DataTelefonos.objects.all()
deleteTele.delete()
deleteObliga = DataObligacion.objects.all()
deleteObliga.delete()
deletePersonas = DataPersonas.objects.all()
deletePersonas.delete()
deletePortaArchivoStra = DataAsignacionarchivosStraus.objects.all()
deletePortaArchivoStra.delete()
deleteArchiStra = DataarchivosStraus.objects.all()
deleteArchiStra.delete()
deletePorta = DataAsignacion.objects.all()
deletePorta.delete()
return render(request, 'cargueArchivos/limpiar.html', {'datoUsu': datoUsu})
|
<mask token>
def preProcesar(request):
id_archivo = request.GET.get('id_archivo')
archivo = DataArchivoCargueProcesar.objects.filter(id=id_archivo).last()
valores, columnas = iniPreviw(id_archivo, archivo.
archivocargueprocesararchivo, archivo.
archivocargueprocesararchivotipocargue.id, archivo.
archivocargueprocesararchivoobservacion)
iniPreviws = dict(valores=valores, columnas=columnas.tolist(), id=
id_archivo)
return HttpResponse(json.dumps(iniPreviws), content_type='application/json'
)
def ProcesarArchivo(request, idAsiganacion):
datoUsu = datosUsu(request.user.id)
print('Aca llego', idAsiganacion)
return render(request, 'cargueArchivos/procesandoArchivo.html', {
'datoUsu': datoUsu, 'id_archivo': idAsiganacion})
def ProcesarArchivoUpdate(request, id_archivo):
datoUsu = datosUsu(request.user.id)
return render(request, 'cargueArchivos/procesandoArchivoUpdate.html', {
'datoUsu': datoUsu, 'id_archivo': id_archivo})
def ProcesarArchivoFinal(request, idAsiganacion):
datoUsu = datosUsu(request.user.id)
print('Star')
ejecucionInicial(idAsiganacion)
contexto = {}
return render(request, 'cargueArchivos/procesandoArchivoOk.html', contexto)
<mask token>
def getFalabella(request):
datoUsu = datosUsu(request.user.id)
inner_qs = DataAsignacion.objects.filter(asignacion_cliente='Falabella')
lista = []
for x in inner_qs:
lista.append(x.id)
pass
campFalabella = DataAsignacionarchivosStraus.objects.filter(
archivos_asignacion__in=lista)
vista = 0
return render(request, 'cargueArchivos/falabella.html', {
'idAsiganacion': 0, 'idFile': 0, 'campFalabella': campFalabella,
'vista': vista, 'datoUsu': datoUsu})
def falabellaCampanacrear(request):
print(request.POST)
datoUsu = datosUsu(request.user.id)
if request.method == 'POST':
strs_nombre = request.POST['archivos_nombre']
porta = getValidate(strs_nombre, 'Falabella', request.user.id,
'Falabella')
if porta == 'Exite':
messages = 1
form = UploadArchivosAsignacion()
resultados = 0
return render(request,
'cargueArchivos/archivosProcesarCreateFalabella.html', {
'idAsiganacion': resultados, 'messages': messages, 'form':
form, 'portafolio': strs_nombre, 'datoUsu': datoUsu})
else:
form = UploadArchivosAsignacion(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1, valores2, valores, resultados = (
getFalabellaClientePreview(porta, 'Falabella', idForm.
id, request.user.id, strs_nombre))
vista = 1
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1, lista1=lista2, lista2=
lista3))
return render(request, 'cargueArchivos/falabella.html', {
'idFile': idForm.id, 'lista': listaFin, 'vista': vista,
'datoUsu': datoUsu, 'idAsiganacion': resultados})
else:
print('No se esta validando el formulario')
pass
pass
else:
form = UploadArchivosAsignacion()
resultados = 0
return render(request,
'cargueArchivos/archivosProcesarCreateFalabella.html', {
'idAsiganacion': resultados, 'form': form, 'datoUsu': datoUsu})
<mask token>
def crezcamosCampanacrear(request):
datoUsu = datosUsu(request.user.id)
if request.method == 'POST':
strs_nombre = request.POST['archivos_nombre']
porta = getValidate(strs_nombre, 'Crezcamos', request.user.id,
'Crezcamos')
if porta == 'Exite':
messages = 1
form = UploadArchivosAsignacion()
return render(request,
'cargueArchivos/Crezcamos/archivosProcesarCreateCrezcamos.html'
, {'datoUsu': datoUsu, 'messages': messages, 'form': form,
'portafolio': strs_nombre})
else:
form = UploadArchivosAsignacion(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1, valores2, valores, resultados = (
getCrezcamosClientePreview(porta, 'Crezcamos', idForm.
id, request.user.id))
vista = 1
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1, lista1=lista2, lista2=
lista3))
return render(request,
'cargueArchivos/Crezcamos/crezcamos.html', {'datoUsu':
datoUsu, 'idFile': idForm.id, 'lista': listaFin,
'vista': vista, 'idAsiganacion': resultados})
else:
print('No se esta validando el formulario')
pass
pass
else:
form = UploadArchivosAsignacion()
return render(request,
'cargueArchivos/Crezcamos/archivosProcesarCreateCrezcamos.html', {
'datoUsu': datoUsu, 'form': form})
def descargaArchivoCampanaSinProcesar(request, id_archivoFinal):
infoArchivo = DataAsignacionarchivosStraus.objects.filter(id=
id_archivoFinal).last()
return redirect('http://poseidon.intelibpo.com:8000/static/upload/%s' %
infoArchivo.archivos_archivo)
def getUpdateCrezcamos(request):
datoUsu = datosUsu(request.user.id)
clie = DataClientesStraus.objects.filter(cliente_nombre='Crezcamos').last()
inner_qs = DataAsignacion.objects.filter(portafolio_usuario=request.
user.id, portafolio_cliente=clie.id)
if request.is_ajax():
idPortafolio = str(request.GET.get('id', None))
idPortafolioValor = str(request.GET.get('valor', None))
idPortafolioDato = str(request.GET.get('dato', None))
if (idPortafolioDato == '1') | (idPortafolioDato == 1):
DataAsignacion.objects.filter(id=idPortafolio).update(
portafolio_contrapropuesta=idPortafolioValor)
else:
DataAsignacion.objects.filter(id=idPortafolio).update(
portafolio_descuentos=idPortafolioValor)
pass
response = {'tipo': 'ok'}
return HttpResponse(json.dumps(response), content_type=
'application/json')
else:
if request.method == 'POST':
form = UploadArchivos(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1, valores2, valores = getCrezcamosClientePreviewUdate(
idForm.id)
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1, lista1=lista2, lista2=
lista3))
vista = 1
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html'
, {'idFile': idForm.id, 'lista': listaFin, 'vista':
vista, 'form': form, 'campCrezcamos': inner_qs,
'datoUsu': datoUsu})
else:
print('No se esta validando el formulario')
pass
else:
form = UploadArchivos()
vista = 0
idForm = 0
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',
{'idFile': idForm, 'vista': vista, 'form': form,
'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
def trazabilidad(request):
datoUsu = datosUsu(request.user.id)
clie = 'Crezcamos'
inner_qs = DataAsignacion.objects.filter(portafolio_usuario=request.
user.id, portafolio_cliente=clie.id)
if request.method == 'POST':
form = UploadArchivos(request.POST, request.FILES)
print(form)
if form.is_valid():
idForm = form.save()
getCrezcamosClientePreviewUdate(idForm.id)
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',
{'form': form, 'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
else:
print('No se esta validando el formulario')
pass
else:
print(2)
form = UploadArchivos()
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html', {
'form': form, 'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
def limpiar(request):
datoUsu = datosUsu(request.user.id)
deleteDatosOrigen = DataUbicacionInfoOrigen.objects.all()
deleteDatosOrigen.delete()
deleteUbicaEmp = DataUbicacionEmpresa.objects.all()
deleteUbicaEmp.delete()
deleteUbica = DataUbicacion.objects.all()
deleteUbica.delete()
deleteCorreos = DataCorreoelectronico.objects.all()
deleteCorreos.delete()
deleteTele = DataTelefonos.objects.all()
deleteTele.delete()
deleteObliga = DataObligacion.objects.all()
deleteObliga.delete()
deletePersonas = DataPersonas.objects.all()
deletePersonas.delete()
deletePortaArchivoStra = DataAsignacionarchivosStraus.objects.all()
deletePortaArchivoStra.delete()
deleteArchiStra = DataarchivosStraus.objects.all()
deleteArchiStra.delete()
deletePorta = DataAsignacion.objects.all()
deletePorta.delete()
return render(request, 'cargueArchivos/limpiar.html', {'datoUsu': datoUsu})
|
from django.conf import settings
from django.urls import resolve
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse, HttpResponseNotFound
from django.template.loader import get_template, render_to_string
from django.views.generic import View
from .models import *
from decimal import Decimal
from datetime import datetime, date, timedelta, time
import re
import simplejson as json
from django.db.models import Sum, Avg, Max
import os
from .forms import *
from django.views.generic import ListView, CreateView
from .clientes import *
from .falabella import *
from .crezcamos import *
from .ejecucion import *
from django.contrib.auth import authenticate, login
from django.core import serializers
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
from django.contrib import messages
def preProcesar(request):
id_archivo = request.GET.get('id_archivo')
archivo = DataArchivoCargueProcesar.objects.filter(id=id_archivo).last()
valores, columnas = iniPreviw(id_archivo, archivo.
archivocargueprocesararchivo, archivo.
archivocargueprocesararchivotipocargue.id, archivo.
archivocargueprocesararchivoobservacion)
iniPreviws = dict(valores=valores, columnas=columnas.tolist(), id=
id_archivo)
return HttpResponse(json.dumps(iniPreviws), content_type='application/json'
)
def ProcesarArchivo(request, idAsiganacion):
datoUsu = datosUsu(request.user.id)
print('Aca llego', idAsiganacion)
return render(request, 'cargueArchivos/procesandoArchivo.html', {
'datoUsu': datoUsu, 'id_archivo': idAsiganacion})
def ProcesarArchivoUpdate(request, id_archivo):
datoUsu = datosUsu(request.user.id)
return render(request, 'cargueArchivos/procesandoArchivoUpdate.html', {
'datoUsu': datoUsu, 'id_archivo': id_archivo})
def ProcesarArchivoFinal(request, idAsiganacion):
datoUsu = datosUsu(request.user.id)
print('Star')
ejecucionInicial(idAsiganacion)
contexto = {}
return render(request, 'cargueArchivos/procesandoArchivoOk.html', contexto)
def ProcesarArchivoFinalUpdate(request, id_archivoFinal):
datoUsu = datosUsu(request.user.id)
print('Star Update')
archivo = DataarchivosStraus.objects.filter(id=id_archivoFinal).last()
cliente = archivo.archivos_portafolio.portafolio_cliente.cliente_nombre
empresa = archivo.archivos_portafolio.portafolio_cliente.cliente_empresa
if cliente == 'Crezcamos':
print(1)
procesado = procesadoFinalCrezcamosUpdate(id_archivoFinal, archivo.
archivos_archivo, archivo.archivos_portafolio, request.user.id,
cliente, empresa)
else:
print(2)
procesado = procesadoFinalFalabellaUpdate(id_archivoFinal, archivo.
archivos_archivo, archivo.archivos_portafolio, request.user.id,
cliente, empresa)
pass
contexto = {'personas': procesado['personas'], 'obligaciones':
procesado['obligaciones'], 'telefonos': procesado['telefonos'],
'correos': procesado['correos'], 'tokens': procesado['tokens'],
'datoUsu': datoUsu}
if procesado['obligaciones'] > 0:
DataAsignacionarchivosStraus.objects.filter(id=id_archivoFinal).update(
archivos_estado=True)
pass
return render(request, 'cargueArchivos/procesandoArchivoUpdateOk.html',
contexto)
def getFalabella(request):
datoUsu = datosUsu(request.user.id)
inner_qs = DataAsignacion.objects.filter(asignacion_cliente='Falabella')
lista = []
for x in inner_qs:
lista.append(x.id)
pass
campFalabella = DataAsignacionarchivosStraus.objects.filter(
archivos_asignacion__in=lista)
vista = 0
return render(request, 'cargueArchivos/falabella.html', {
'idAsiganacion': 0, 'idFile': 0, 'campFalabella': campFalabella,
'vista': vista, 'datoUsu': datoUsu})
def falabellaCampanacrear(request):
print(request.POST)
datoUsu = datosUsu(request.user.id)
if request.method == 'POST':
strs_nombre = request.POST['archivos_nombre']
porta = getValidate(strs_nombre, 'Falabella', request.user.id,
'Falabella')
if porta == 'Exite':
messages = 1
form = UploadArchivosAsignacion()
resultados = 0
return render(request,
'cargueArchivos/archivosProcesarCreateFalabella.html', {
'idAsiganacion': resultados, 'messages': messages, 'form':
form, 'portafolio': strs_nombre, 'datoUsu': datoUsu})
else:
form = UploadArchivosAsignacion(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1, valores2, valores, resultados = (
getFalabellaClientePreview(porta, 'Falabella', idForm.
id, request.user.id, strs_nombre))
vista = 1
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1, lista1=lista2, lista2=
lista3))
return render(request, 'cargueArchivos/falabella.html', {
'idFile': idForm.id, 'lista': listaFin, 'vista': vista,
'datoUsu': datoUsu, 'idAsiganacion': resultados})
else:
print('No se esta validando el formulario')
pass
pass
else:
form = UploadArchivosAsignacion()
resultados = 0
return render(request,
'cargueArchivos/archivosProcesarCreateFalabella.html', {
'idAsiganacion': resultados, 'form': form, 'datoUsu': datoUsu})
def getCrezcamos(request):
datoUsu = datosUsu(request.user.id)
inner_qs = DataAsignacion.objects.filter(asignacion_cliente='Crezcamos')
lista = []
for x in inner_qs:
lista.append(x.id)
pass
campCrezcamos = DataAsignacionarchivosStraus.objects.filter(
archivos_asignacion__in=lista)
vista = 0
return render(request, 'cargueArchivos/Crezcamos/crezcamos.html', {
'idFile': 0, 'campCrezcamos': campCrezcamos, 'vista': vista,
'datoUsu': datoUsu})
def crezcamosCampanacrear(request):
datoUsu = datosUsu(request.user.id)
if request.method == 'POST':
strs_nombre = request.POST['archivos_nombre']
porta = getValidate(strs_nombre, 'Crezcamos', request.user.id,
'Crezcamos')
if porta == 'Exite':
messages = 1
form = UploadArchivosAsignacion()
return render(request,
'cargueArchivos/Crezcamos/archivosProcesarCreateCrezcamos.html'
, {'datoUsu': datoUsu, 'messages': messages, 'form': form,
'portafolio': strs_nombre})
else:
form = UploadArchivosAsignacion(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1, valores2, valores, resultados = (
getCrezcamosClientePreview(porta, 'Crezcamos', idForm.
id, request.user.id))
vista = 1
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1, lista1=lista2, lista2=
lista3))
return render(request,
'cargueArchivos/Crezcamos/crezcamos.html', {'datoUsu':
datoUsu, 'idFile': idForm.id, 'lista': listaFin,
'vista': vista, 'idAsiganacion': resultados})
else:
print('No se esta validando el formulario')
pass
pass
else:
form = UploadArchivosAsignacion()
return render(request,
'cargueArchivos/Crezcamos/archivosProcesarCreateCrezcamos.html', {
'datoUsu': datoUsu, 'form': form})
def descargaArchivoCampanaSinProcesar(request, id_archivoFinal):
infoArchivo = DataAsignacionarchivosStraus.objects.filter(id=
id_archivoFinal).last()
return redirect('http://poseidon.intelibpo.com:8000/static/upload/%s' %
infoArchivo.archivos_archivo)
def getUpdateCrezcamos(request):
datoUsu = datosUsu(request.user.id)
clie = DataClientesStraus.objects.filter(cliente_nombre='Crezcamos').last()
inner_qs = DataAsignacion.objects.filter(portafolio_usuario=request.
user.id, portafolio_cliente=clie.id)
if request.is_ajax():
idPortafolio = str(request.GET.get('id', None))
idPortafolioValor = str(request.GET.get('valor', None))
idPortafolioDato = str(request.GET.get('dato', None))
if (idPortafolioDato == '1') | (idPortafolioDato == 1):
DataAsignacion.objects.filter(id=idPortafolio).update(
portafolio_contrapropuesta=idPortafolioValor)
else:
DataAsignacion.objects.filter(id=idPortafolio).update(
portafolio_descuentos=idPortafolioValor)
pass
response = {'tipo': 'ok'}
return HttpResponse(json.dumps(response), content_type=
'application/json')
else:
if request.method == 'POST':
form = UploadArchivos(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1, valores2, valores = getCrezcamosClientePreviewUdate(
idForm.id)
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1, lista1=lista2, lista2=
lista3))
vista = 1
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html'
, {'idFile': idForm.id, 'lista': listaFin, 'vista':
vista, 'form': form, 'campCrezcamos': inner_qs,
'datoUsu': datoUsu})
else:
print('No se esta validando el formulario')
pass
else:
form = UploadArchivos()
vista = 0
idForm = 0
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',
{'idFile': idForm, 'vista': vista, 'form': form,
'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
def trazabilidad(request):
datoUsu = datosUsu(request.user.id)
clie = 'Crezcamos'
inner_qs = DataAsignacion.objects.filter(portafolio_usuario=request.
user.id, portafolio_cliente=clie.id)
if request.method == 'POST':
form = UploadArchivos(request.POST, request.FILES)
print(form)
if form.is_valid():
idForm = form.save()
getCrezcamosClientePreviewUdate(idForm.id)
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',
{'form': form, 'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
else:
print('No se esta validando el formulario')
pass
else:
print(2)
form = UploadArchivos()
return render(request,
'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html', {
'form': form, 'campCrezcamos': inner_qs, 'datoUsu': datoUsu})
def limpiar(request):
datoUsu = datosUsu(request.user.id)
deleteDatosOrigen = DataUbicacionInfoOrigen.objects.all()
deleteDatosOrigen.delete()
deleteUbicaEmp = DataUbicacionEmpresa.objects.all()
deleteUbicaEmp.delete()
deleteUbica = DataUbicacion.objects.all()
deleteUbica.delete()
deleteCorreos = DataCorreoelectronico.objects.all()
deleteCorreos.delete()
deleteTele = DataTelefonos.objects.all()
deleteTele.delete()
deleteObliga = DataObligacion.objects.all()
deleteObliga.delete()
deletePersonas = DataPersonas.objects.all()
deletePersonas.delete()
deletePortaArchivoStra = DataAsignacionarchivosStraus.objects.all()
deletePortaArchivoStra.delete()
deleteArchiStra = DataarchivosStraus.objects.all()
deleteArchiStra.delete()
deletePorta = DataAsignacion.objects.all()
deletePorta.delete()
return render(request, 'cargueArchivos/limpiar.html', {'datoUsu': datoUsu})
|
from django.conf import settings
from django.urls import resolve
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse, HttpResponseNotFound
from django.template.loader import get_template, render_to_string
from django.views.generic import View
from .models import *
from decimal import Decimal
from datetime import datetime, date, timedelta,time
import re
import simplejson as json
from django.db.models import Sum, Avg, Max
import os
from .forms import *
from django.views.generic import ListView,CreateView
from .clientes import *
from .falabella import *
from .crezcamos import *
from .ejecucion import *
from django.contrib.auth import authenticate, login
from django.core import serializers
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
from django.contrib import messages
def preProcesar(request):
id_archivo = request.GET.get('id_archivo')
archivo = DataArchivoCargueProcesar.objects.filter(id=id_archivo).last()
valores, columnas = iniPreviw(id_archivo,archivo.archivocargueprocesararchivo,archivo.archivocargueprocesararchivotipocargue.id,archivo.archivocargueprocesararchivoobservacion)
iniPreviws = dict(valores=valores,columnas=columnas.tolist(),id=id_archivo)
return HttpResponse(json.dumps(iniPreviws), content_type='application/json')
def ProcesarArchivo(request,idAsiganacion):
datoUsu = datosUsu(request.user.id)
print('Aca llego',idAsiganacion)
return render(request, 'cargueArchivos/procesandoArchivo.html',{'datoUsu':datoUsu,'id_archivo':idAsiganacion})
def ProcesarArchivoUpdate(request,id_archivo):
datoUsu = datosUsu(request.user.id)
return render(request, 'cargueArchivos/procesandoArchivoUpdate.html',{'datoUsu':datoUsu,'id_archivo':id_archivo})
def ProcesarArchivoFinal(request,idAsiganacion):
datoUsu = datosUsu(request.user.id)
print('Star')
# archivo = DataAsignacionarchivosStraus.objects.filter(id=idAsiganacion).last()
# cliente = archivo.archivos_portafolio.portafolio_cliente.cliente_nombre
# empresa = archivo.archivos_portafolio.portafolio_cliente.cliente_empresa
# if cliente == 'Crezcamos':
# procesado = procesadoFinalCrezcamos(idAsiganacion,archivo.archivos_archivo,archivo.archivos_portafolio,request.user.id,cliente,empresa)
# else:
# print('Ejecucion del ETL/DASH')
# procesado = procesadoFinalFalabella(idAsiganacion,archivo.archivos_archivo,archivo.archivos_portafolio,request.user.id,cliente,empresa)
# pass
# contexto ={
# 'personas':procesado['personas'],
# 'obligaciones':procesado['obligaciones'],
# 'telefonos':procesado['telefonos'],
# 'correos':procesado['correos'],
# 'tokens':procesado['tokens'],
# 'datoUsu':datoUsu,
# }
# if procesado['obligaciones']>0:
# DataAsignacionarchivosStraus.objects.filter(id=id_archivoFinal).update(archivos_estado=True)
# pass
ejecucionInicial(idAsiganacion)
contexto = {}
return render(request, 'cargueArchivos/procesandoArchivoOk.html',contexto)
def ProcesarArchivoFinalUpdate(request,id_archivoFinal):
datoUsu = datosUsu(request.user.id)
print('Star Update')
archivo = DataarchivosStraus.objects.filter(id=id_archivoFinal).last()
cliente = archivo.archivos_portafolio.portafolio_cliente.cliente_nombre
empresa = archivo.archivos_portafolio.portafolio_cliente.cliente_empresa
if cliente == 'Crezcamos':
print(1)
procesado = procesadoFinalCrezcamosUpdate(id_archivoFinal,archivo.archivos_archivo,archivo.archivos_portafolio,request.user.id,cliente,empresa)
else:
print(2)
procesado = procesadoFinalFalabellaUpdate(id_archivoFinal,archivo.archivos_archivo,archivo.archivos_portafolio,request.user.id,cliente,empresa)
pass
contexto ={
'personas':procesado['personas'],
'obligaciones':procesado['obligaciones'],
'telefonos':procesado['telefonos'],
'correos':procesado['correos'],
'tokens':procesado['tokens'],
'datoUsu':datoUsu,
}
if procesado['obligaciones']>0:
DataAsignacionarchivosStraus.objects.filter(id=id_archivoFinal).update(archivos_estado=True)
pass
return render(request, 'cargueArchivos/procesandoArchivoUpdateOk.html',contexto)
#@login_required
def getFalabella(request):
datoUsu = datosUsu(request.user.id)
inner_qs = DataAsignacion.objects.filter(asignacion_cliente='Falabella')
lista = []
for x in inner_qs:
lista.append(x.id)
pass
campFalabella = DataAsignacionarchivosStraus.objects.filter(archivos_asignacion__in=lista)
vista = 0
return render(request, 'cargueArchivos/falabella.html',{'idAsiganacion':0,'idFile':0,'campFalabella':campFalabella,'vista':vista,'datoUsu':datoUsu})
def falabellaCampanacrear(request):
print(request.POST)
datoUsu = datosUsu(request.user.id)
if request.method == 'POST':
strs_nombre = request.POST['archivos_nombre']
porta = getValidate(strs_nombre,'Falabella',request.user.id,'Falabella')
if porta=='Exite':
messages = 1
form = UploadArchivosAsignacion()
resultados = 0
return render(request,'cargueArchivos/archivosProcesarCreateFalabella.html',{'idAsiganacion':resultados,'messages':messages,'form':form,'portafolio':strs_nombre,'datoUsu':datoUsu})
else:
form = UploadArchivosAsignacion(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1,valores2,valores,resultados = getFalabellaClientePreview(porta,'Falabella',idForm.id,request.user.id,strs_nombre)
vista = 1
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1,lista1=lista2,lista2=lista3))
return render(request, 'cargueArchivos/falabella.html',{'idFile':idForm.id,'lista':listaFin,'vista':vista,'datoUsu':datoUsu,'idAsiganacion':resultados})
else:
print('No se esta validando el formulario')
pass
pass
else:
form = UploadArchivosAsignacion()
resultados = 0
return render(request, 'cargueArchivos/archivosProcesarCreateFalabella.html', {'idAsiganacion':resultados,'form':form,'datoUsu':datoUsu})
# Crezcamos
def getCrezcamos(request):
datoUsu = datosUsu(request.user.id)
inner_qs = DataAsignacion.objects.filter(asignacion_cliente='Crezcamos')
lista = []
for x in inner_qs:
lista.append(x.id)
pass
campCrezcamos = DataAsignacionarchivosStraus.objects.filter(archivos_asignacion__in=lista)
vista = 0
return render(request, 'cargueArchivos/Crezcamos/crezcamos.html',{'idFile':0,'campCrezcamos':campCrezcamos,'vista':vista,'datoUsu':datoUsu})
def crezcamosCampanacrear(request):
datoUsu = datosUsu(request.user.id)
if request.method == 'POST':
strs_nombre = request.POST['archivos_nombre']
porta = getValidate(strs_nombre,'Crezcamos',request.user.id,'Crezcamos')
if porta=='Exite':
messages = 1
form = UploadArchivosAsignacion()
return render(request,'cargueArchivos/Crezcamos/archivosProcesarCreateCrezcamos.html',{'datoUsu':datoUsu,'messages':messages,'form':form,'portafolio':strs_nombre})
else:
form = UploadArchivosAsignacion(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1,valores2,valores,resultados = getCrezcamosClientePreview(porta,'Crezcamos',idForm.id,request.user.id)
vista = 1
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1,lista1=lista2,lista2=lista3))
return render(request, 'cargueArchivos/Crezcamos/crezcamos.html',{'datoUsu':datoUsu,'idFile':idForm.id,'lista':listaFin,'vista':vista,'idAsiganacion':resultados})
else:
print('No se esta validando el formulario')
pass
pass
else:
form = UploadArchivosAsignacion()
return render(request, 'cargueArchivos/Crezcamos/archivosProcesarCreateCrezcamos.html', {'datoUsu':datoUsu,'form':form})
def descargaArchivoCampanaSinProcesar(request,id_archivoFinal):
infoArchivo = DataAsignacionarchivosStraus.objects.filter(id=id_archivoFinal).last()
return redirect('http://poseidon.intelibpo.com:8000/static/upload/%s'%(infoArchivo.archivos_archivo))
def getUpdateCrezcamos(request):
datoUsu = datosUsu(request.user.id)
clie = DataClientesStraus.objects.filter(cliente_nombre='Crezcamos').last()
inner_qs = DataAsignacion.objects.filter(portafolio_usuario=request.user.id,portafolio_cliente=clie.id)
if request.is_ajax():
idPortafolio = str(request.GET.get('id', None))
idPortafolioValor = str(request.GET.get('valor', None))
idPortafolioDato = str(request.GET.get('dato', None))
if ( (idPortafolioDato=='1') | (idPortafolioDato==1)):
DataAsignacion.objects.filter(id=idPortafolio).update(portafolio_contrapropuesta=idPortafolioValor)
else:
DataAsignacion.objects.filter(id=idPortafolio).update(portafolio_descuentos=idPortafolioValor)
pass
response = {'tipo': "ok"}
return HttpResponse(json.dumps(response), content_type='application/json')
else:
if request.method == 'POST':
form = UploadArchivos(request.POST, request.FILES)
if form.is_valid():
idForm = form.save()
valores1,valores2,valores = getCrezcamosClientePreviewUdate(idForm.id)
lista1 = []
lista2 = []
lista3 = []
listaFin = []
for x in valores:
lista1.append(x)
pass
for x in valores1:
lista2.append(x)
pass
for x in valores2:
lista3.append(x)
pass
listaFin.append(dict(lista=lista1,lista1=lista2,lista2=lista3))
vista = 1
return render(request, 'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',{'idFile':idForm.id,'lista':listaFin,'vista':vista,'form':form,'campCrezcamos':inner_qs,'datoUsu':datoUsu})
else:
print('No se esta validando el formulario')
pass
else:
form = UploadArchivos()
vista = 0
idForm = 0
return render(request, 'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',{'idFile':idForm,'vista':vista,'form':form,'campCrezcamos':inner_qs,'datoUsu':datoUsu})
def trazabilidad(request):
datoUsu = datosUsu(request.user.id)
clie = 'Crezcamos'
inner_qs = DataAsignacion.objects.filter(portafolio_usuario=request.user.id,portafolio_cliente=clie.id)
if request.method == 'POST':
form = UploadArchivos(request.POST, request.FILES)
print(form)
if form.is_valid():
idForm = form.save()
getCrezcamosClientePreviewUdate(idForm.id)
return render(request, 'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',{'form':form,'campCrezcamos':inner_qs,'datoUsu':datoUsu})
else:
print('No se esta validando el formulario')
pass
else:
print(2)
form = UploadArchivos()
return render(request, 'cargueArchivos/UpdateCampañas/Crezcamos/updateCrezcamos.html',{'form':form,'campCrezcamos':inner_qs,'datoUsu':datoUsu})
def limpiar(request):
datoUsu = datosUsu(request.user.id)
deleteDatosOrigen = DataUbicacionInfoOrigen.objects.all()
deleteDatosOrigen.delete()
deleteUbicaEmp = DataUbicacionEmpresa.objects.all()
deleteUbicaEmp.delete()
deleteUbica = DataUbicacion.objects.all()
deleteUbica.delete()
deleteCorreos = DataCorreoelectronico.objects.all()
deleteCorreos.delete()
deleteTele = DataTelefonos.objects.all()
deleteTele.delete()
deleteObliga = DataObligacion.objects.all()
deleteObliga.delete()
deletePersonas = DataPersonas.objects.all()
deletePersonas.delete()
deletePortaArchivoStra = DataAsignacionarchivosStraus.objects.all()
deletePortaArchivoStra.delete()
deleteArchiStra = DataarchivosStraus.objects.all()
deleteArchiStra.delete()
deletePorta = DataAsignacion.objects.all()
deletePorta.delete()
return render(request, 'cargueArchivos/limpiar.html',{'datoUsu':datoUsu})
|
[
7,
10,
11,
14,
15
] |
2,454 |
c6821cb8dd6f8d74ca20c03f87dae321eb869c32
|
<mask token>
@attr.s
class GPTools:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def find_clipboard(self):
for process in psutil.process_iter():
if process.name().lower() != 'gp5.exe':
continue
break
else:
raise click.ClickException(
'cannot get Guitar Pro 5 clipboard, is the process running?')
exe_path = process.cmdline()[0]
clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',
'clipboard.tmp')
return clipboard_path
def write(self):
format = None if self.song.clipboard is None else 'tmp'
guitarpro.write(self.song, self.output_file, format=format)
def selected(self):
for track in self.selected_tracks():
for measure in self.selected_measures(track):
for voice in measure.voices:
for beat in self.selected_beats(voice):
yield track, measure, voice, beat
def selected_tracks(self):
if self.selected_track_numbers is ALL:
yield from self.song.tracks
return
for track in self.song.tracks:
if track.number in self.selected_track_numbers:
yield track
<mask token>
<mask token>
|
<mask token>
@attr.s
class GPTools:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def parse(self):
if self.input_file is None:
self.input_file = self.find_clipboard()
if self.output_file is None:
self.output_file = self.input_file
self.song = guitarpro.parse(self.input_file)
if self.selected_track_numbers is None:
if self.song.clipboard is not None:
self.selected_track_numbers = list(range(self.song.
clipboard.startTrack, self.song.clipboard.stopTrack + 1))
else:
self.selected_track_numbers = ALL
if self.selected_measure_numbers is None:
if self.song.clipboard is not None:
self.selected_measure_numbers = list(range(self.song.
clipboard.startMeasure, self.song.clipboard.stopMeasure +
1))
else:
self.selected_measure_numbers = ALL
if self.selected_beat_numbers is None:
if (self.song.clipboard is not None and self.song.clipboard.
subBarCopy):
self.selected_beat_numbers = list(range(self.song.clipboard
.startBeat, self.song.clipboard.stopBeat + 1))
else:
self.selected_beat_numbers = ALL
def find_clipboard(self):
for process in psutil.process_iter():
if process.name().lower() != 'gp5.exe':
continue
break
else:
raise click.ClickException(
'cannot get Guitar Pro 5 clipboard, is the process running?')
exe_path = process.cmdline()[0]
clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',
'clipboard.tmp')
return clipboard_path
def write(self):
format = None if self.song.clipboard is None else 'tmp'
guitarpro.write(self.song, self.output_file, format=format)
def selected(self):
for track in self.selected_tracks():
for measure in self.selected_measures(track):
for voice in measure.voices:
for beat in self.selected_beats(voice):
yield track, measure, voice, beat
def selected_tracks(self):
if self.selected_track_numbers is ALL:
yield from self.song.tracks
return
for track in self.song.tracks:
if track.number in self.selected_track_numbers:
yield track
def selected_measures(self, track):
if self.selected_measure_numbers is ALL:
yield from track.measures
return
for measure in track.measures:
if measure.number in self.selected_measure_numbers:
yield measure
<mask token>
|
<mask token>
@attr.s
class GPTools:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def parse(self):
if self.input_file is None:
self.input_file = self.find_clipboard()
if self.output_file is None:
self.output_file = self.input_file
self.song = guitarpro.parse(self.input_file)
if self.selected_track_numbers is None:
if self.song.clipboard is not None:
self.selected_track_numbers = list(range(self.song.
clipboard.startTrack, self.song.clipboard.stopTrack + 1))
else:
self.selected_track_numbers = ALL
if self.selected_measure_numbers is None:
if self.song.clipboard is not None:
self.selected_measure_numbers = list(range(self.song.
clipboard.startMeasure, self.song.clipboard.stopMeasure +
1))
else:
self.selected_measure_numbers = ALL
if self.selected_beat_numbers is None:
if (self.song.clipboard is not None and self.song.clipboard.
subBarCopy):
self.selected_beat_numbers = list(range(self.song.clipboard
.startBeat, self.song.clipboard.stopBeat + 1))
else:
self.selected_beat_numbers = ALL
def find_clipboard(self):
for process in psutil.process_iter():
if process.name().lower() != 'gp5.exe':
continue
break
else:
raise click.ClickException(
'cannot get Guitar Pro 5 clipboard, is the process running?')
exe_path = process.cmdline()[0]
clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',
'clipboard.tmp')
return clipboard_path
def write(self):
format = None if self.song.clipboard is None else 'tmp'
guitarpro.write(self.song, self.output_file, format=format)
def selected(self):
for track in self.selected_tracks():
for measure in self.selected_measures(track):
for voice in measure.voices:
for beat in self.selected_beats(voice):
yield track, measure, voice, beat
def selected_tracks(self):
if self.selected_track_numbers is ALL:
yield from self.song.tracks
return
for track in self.song.tracks:
if track.number in self.selected_track_numbers:
yield track
def selected_measures(self, track):
if self.selected_measure_numbers is ALL:
yield from track.measures
return
for measure in track.measures:
if measure.number in self.selected_measure_numbers:
yield measure
def selected_beats(self, voice):
if self.selected_beat_numbers is ALL:
yield from voice.beats
return
for number, beat in enumerate(voice.beats, start=1):
if number in self.selected_beat_numbers:
yield beat
|
import os
import attr
import click
import guitarpro
import psutil
ALL = object()
@attr.s
class GPTools:
input_file = attr.ib()
output_file = attr.ib()
selected_track_numbers = attr.ib(default=None)
selected_measure_numbers = attr.ib(default=None)
selected_beat_numbers = attr.ib(default=None)
song = None
def parse(self):
if self.input_file is None:
self.input_file = self.find_clipboard()
if self.output_file is None:
self.output_file = self.input_file
self.song = guitarpro.parse(self.input_file)
if self.selected_track_numbers is None:
if self.song.clipboard is not None:
self.selected_track_numbers = list(range(self.song.
clipboard.startTrack, self.song.clipboard.stopTrack + 1))
else:
self.selected_track_numbers = ALL
if self.selected_measure_numbers is None:
if self.song.clipboard is not None:
self.selected_measure_numbers = list(range(self.song.
clipboard.startMeasure, self.song.clipboard.stopMeasure +
1))
else:
self.selected_measure_numbers = ALL
if self.selected_beat_numbers is None:
if (self.song.clipboard is not None and self.song.clipboard.
subBarCopy):
self.selected_beat_numbers = list(range(self.song.clipboard
.startBeat, self.song.clipboard.stopBeat + 1))
else:
self.selected_beat_numbers = ALL
def find_clipboard(self):
for process in psutil.process_iter():
if process.name().lower() != 'gp5.exe':
continue
break
else:
raise click.ClickException(
'cannot get Guitar Pro 5 clipboard, is the process running?')
exe_path = process.cmdline()[0]
clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp',
'clipboard.tmp')
return clipboard_path
def write(self):
format = None if self.song.clipboard is None else 'tmp'
guitarpro.write(self.song, self.output_file, format=format)
def selected(self):
for track in self.selected_tracks():
for measure in self.selected_measures(track):
for voice in measure.voices:
for beat in self.selected_beats(voice):
yield track, measure, voice, beat
def selected_tracks(self):
if self.selected_track_numbers is ALL:
yield from self.song.tracks
return
for track in self.song.tracks:
if track.number in self.selected_track_numbers:
yield track
def selected_measures(self, track):
if self.selected_measure_numbers is ALL:
yield from track.measures
return
for measure in track.measures:
if measure.number in self.selected_measure_numbers:
yield measure
def selected_beats(self, voice):
if self.selected_beat_numbers is ALL:
yield from voice.beats
return
for number, beat in enumerate(voice.beats, start=1):
if number in self.selected_beat_numbers:
yield beat
|
import os
import attr
import click
import guitarpro
import psutil
ALL = object()
@attr.s
class GPTools:
input_file = attr.ib()
output_file = attr.ib()
selected_track_numbers = attr.ib(default=None)
selected_measure_numbers = attr.ib(default=None)
selected_beat_numbers = attr.ib(default=None)
song = None
def parse(self):
if self.input_file is None:
self.input_file = self.find_clipboard()
if self.output_file is None:
self.output_file = self.input_file
self.song = guitarpro.parse(self.input_file)
if self.selected_track_numbers is None:
if self.song.clipboard is not None:
self.selected_track_numbers = list(range(self.song.clipboard.startTrack, self.song.clipboard.stopTrack+1))
else:
self.selected_track_numbers = ALL
if self.selected_measure_numbers is None:
if self.song.clipboard is not None:
self.selected_measure_numbers = list(range(self.song.clipboard.startMeasure, self.song.clipboard.stopMeasure+1))
else:
self.selected_measure_numbers = ALL
if self.selected_beat_numbers is None:
if self.song.clipboard is not None and self.song.clipboard.subBarCopy:
self.selected_beat_numbers = list(range(self.song.clipboard.startBeat, self.song.clipboard.stopBeat+1))
else:
self.selected_beat_numbers = ALL
def find_clipboard(self):
for process in psutil.process_iter():
if process.name().lower() != 'gp5.exe':
continue
break
else:
raise click.ClickException('cannot get Guitar Pro 5 clipboard, is the process running?')
exe_path = process.cmdline()[0]
clipboard_path = os.path.join(os.path.dirname(exe_path), 'tmp', 'clipboard.tmp')
return clipboard_path
def write(self):
format = None if self.song.clipboard is None else 'tmp'
guitarpro.write(self.song, self.output_file, format=format)
def selected(self):
for track in self.selected_tracks():
for measure in self.selected_measures(track):
for voice in measure.voices:
for beat in self.selected_beats(voice):
yield track, measure, voice, beat
def selected_tracks(self):
if self.selected_track_numbers is ALL:
yield from self.song.tracks
return
for track in self.song.tracks:
if track.number in self.selected_track_numbers:
yield track
def selected_measures(self, track):
if self.selected_measure_numbers is ALL:
yield from track.measures
return
for measure in track.measures:
if measure.number in self.selected_measure_numbers:
yield measure
def selected_beats(self, voice):
if self.selected_beat_numbers is ALL:
yield from voice.beats
return
for number, beat in enumerate(voice.beats, start=1):
if number in self.selected_beat_numbers:
yield beat
|
[
5,
7,
8,
11,
12
] |
2,455 |
90218168841dc76febab67d1e992dfc993730ea4
|
<mask token>
def run_smac(max_fun=30):
from smac.facade.func_facade import fmin_smac
x, cost, smac = fmin_smac(func=test_func, x0=[-0], bounds=[(-5, 5)],
maxfun=max_fun, rng=1234)
runhistory = smac.get_runhistory()
x_smac = []
y_smac = []
for entry in runhistory.data:
config_id = entry.config_id
config = runhistory.ids_config[config_id]
y_ = runhistory.get_cost(config)
x_ = config['x1']
x_smac.append(x_)
y_smac.append(y_)
x_smac = np.array(x_smac)
y_smac = np.array(y_smac)
return smac, x_smac, y_smac
<mask token>
def clean_smac_shit():
import os
import shutil
for f in os.listdir('.'):
if f.startswith('smac3-output_'):
shutil.rmtree(f)
<mask token>
|
<mask token>
def test_func(x):
x = x[0]
return math.cos(x) * x ** 2 + x
def run_smac(max_fun=30):
from smac.facade.func_facade import fmin_smac
x, cost, smac = fmin_smac(func=test_func, x0=[-0], bounds=[(-5, 5)],
maxfun=max_fun, rng=1234)
runhistory = smac.get_runhistory()
x_smac = []
y_smac = []
for entry in runhistory.data:
config_id = entry.config_id
config = runhistory.ids_config[config_id]
y_ = runhistory.get_cost(config)
x_ = config['x1']
x_smac.append(x_)
y_smac.append(y_)
x_smac = np.array(x_smac)
y_smac = np.array(y_smac)
return smac, x_smac, y_smac
def plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):
"""
plot function with all evaluated points,
EI acquisition function
Predictions with uncertainties
"""
from smac.optimizer.acquisition import EI
step = step or len(x_smac)
x_smac_ = np.array([[x] for x in x_smac[:step]])
y_smac_ = np.array([[y] for y in y_smac[:step]])
model.train(x_smac_, y_smac_)
acq_func = EI(model=model)
acq_func.update(model=model, eta=np.min(y_smac))
x_points_ = np.array([[x] for x in x_points])
acq_values = acq_func._compute(X=x_points_)[:, 0]
y_mean, y_var = model.predict(x_points_)
y_mean = y_mean[:, 0]
y_std = np.sqrt(y_var)[:, 0]
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x_points, acq_values)
plt.title('Aquisition Function')
plt.savefig('fig%da.pdf' % step)
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x_points, y_mean)
ax1.fill_between(x_points, y_mean - y_std, y_mean + y_std, alpha=0.5)
ax1.plot(x_smac[:step], y_smac[:step], 'bo')
ax1.plot(x_smac[:step], y_smac[:step], 'ro')
ax1.plot(x_points, y_points, '--')
plt.title('Uncertainty Predictions')
plt.savefig('fig%db.pdf' % step)
def clean_smac_shit():
import os
import shutil
for f in os.listdir('.'):
if f.startswith('smac3-output_'):
shutil.rmtree(f)
<mask token>
|
<mask token>
def test_func(x):
x = x[0]
return math.cos(x) * x ** 2 + x
def run_smac(max_fun=30):
from smac.facade.func_facade import fmin_smac
x, cost, smac = fmin_smac(func=test_func, x0=[-0], bounds=[(-5, 5)],
maxfun=max_fun, rng=1234)
runhistory = smac.get_runhistory()
x_smac = []
y_smac = []
for entry in runhistory.data:
config_id = entry.config_id
config = runhistory.ids_config[config_id]
y_ = runhistory.get_cost(config)
x_ = config['x1']
x_smac.append(x_)
y_smac.append(y_)
x_smac = np.array(x_smac)
y_smac = np.array(y_smac)
return smac, x_smac, y_smac
def plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):
"""
plot function with all evaluated points,
EI acquisition function
Predictions with uncertainties
"""
from smac.optimizer.acquisition import EI
step = step or len(x_smac)
x_smac_ = np.array([[x] for x in x_smac[:step]])
y_smac_ = np.array([[y] for y in y_smac[:step]])
model.train(x_smac_, y_smac_)
acq_func = EI(model=model)
acq_func.update(model=model, eta=np.min(y_smac))
x_points_ = np.array([[x] for x in x_points])
acq_values = acq_func._compute(X=x_points_)[:, 0]
y_mean, y_var = model.predict(x_points_)
y_mean = y_mean[:, 0]
y_std = np.sqrt(y_var)[:, 0]
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x_points, acq_values)
plt.title('Aquisition Function')
plt.savefig('fig%da.pdf' % step)
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x_points, y_mean)
ax1.fill_between(x_points, y_mean - y_std, y_mean + y_std, alpha=0.5)
ax1.plot(x_smac[:step], y_smac[:step], 'bo')
ax1.plot(x_smac[:step], y_smac[:step], 'ro')
ax1.plot(x_points, y_points, '--')
plt.title('Uncertainty Predictions')
plt.savefig('fig%db.pdf' % step)
def clean_smac_shit():
import os
import shutil
for f in os.listdir('.'):
if f.startswith('smac3-output_'):
shutil.rmtree(f)
if __name__ == '__main__':
from smac.epm.rf_with_instances import RandomForestWithInstances
x_points = np.linspace(start=-5, stop=5, num=100)
y_points = list(map(test_func, map(lambda x: [x], x_points)))
smac, x_smac, y_smac = run_smac()
types, bounds = np.array([0]), np.array([[0.0, 1.0]])
model = RandomForestWithInstances(types=types, bounds=bounds,
instance_features=None, seed=12345, pca_components=12345,
ratio_features=1, num_trees=1000, min_samples_split=1,
min_samples_leaf=1, max_depth=100000, do_bootstrapping=False,
n_points_per_tree=-1, eps_purity=0)
for i in range(10):
plot_state(smac, model, x_points, y_points, x_smac, y_smac, i + 1)
clean_smac_shit()
|
import math
import numpy as np
import matplotlib.pyplot as plt
def test_func(x):
x = x[0]
return math.cos(x) * x ** 2 + x
def run_smac(max_fun=30):
from smac.facade.func_facade import fmin_smac
x, cost, smac = fmin_smac(func=test_func, x0=[-0], bounds=[(-5, 5)],
maxfun=max_fun, rng=1234)
runhistory = smac.get_runhistory()
x_smac = []
y_smac = []
for entry in runhistory.data:
config_id = entry.config_id
config = runhistory.ids_config[config_id]
y_ = runhistory.get_cost(config)
x_ = config['x1']
x_smac.append(x_)
y_smac.append(y_)
x_smac = np.array(x_smac)
y_smac = np.array(y_smac)
return smac, x_smac, y_smac
def plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):
"""
plot function with all evaluated points,
EI acquisition function
Predictions with uncertainties
"""
from smac.optimizer.acquisition import EI
step = step or len(x_smac)
x_smac_ = np.array([[x] for x in x_smac[:step]])
y_smac_ = np.array([[y] for y in y_smac[:step]])
model.train(x_smac_, y_smac_)
acq_func = EI(model=model)
acq_func.update(model=model, eta=np.min(y_smac))
x_points_ = np.array([[x] for x in x_points])
acq_values = acq_func._compute(X=x_points_)[:, 0]
y_mean, y_var = model.predict(x_points_)
y_mean = y_mean[:, 0]
y_std = np.sqrt(y_var)[:, 0]
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x_points, acq_values)
plt.title('Aquisition Function')
plt.savefig('fig%da.pdf' % step)
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x_points, y_mean)
ax1.fill_between(x_points, y_mean - y_std, y_mean + y_std, alpha=0.5)
ax1.plot(x_smac[:step], y_smac[:step], 'bo')
ax1.plot(x_smac[:step], y_smac[:step], 'ro')
ax1.plot(x_points, y_points, '--')
plt.title('Uncertainty Predictions')
plt.savefig('fig%db.pdf' % step)
def clean_smac_shit():
import os
import shutil
for f in os.listdir('.'):
if f.startswith('smac3-output_'):
shutil.rmtree(f)
if __name__ == '__main__':
from smac.epm.rf_with_instances import RandomForestWithInstances
x_points = np.linspace(start=-5, stop=5, num=100)
y_points = list(map(test_func, map(lambda x: [x], x_points)))
smac, x_smac, y_smac = run_smac()
types, bounds = np.array([0]), np.array([[0.0, 1.0]])
model = RandomForestWithInstances(types=types, bounds=bounds,
instance_features=None, seed=12345, pca_components=12345,
ratio_features=1, num_trees=1000, min_samples_split=1,
min_samples_leaf=1, max_depth=100000, do_bootstrapping=False,
n_points_per_tree=-1, eps_purity=0)
for i in range(10):
plot_state(smac, model, x_points, y_points, x_smac, y_smac, i + 1)
clean_smac_shit()
|
import math
import numpy as np
import matplotlib.pyplot as plt
def test_func(x):
# x is vector; here of length 1
x = x[0]
return math.cos(x) * x**2 + x
def run_smac(max_fun=30):
from smac.facade.func_facade import fmin_smac
x, cost, smac = fmin_smac(func=test_func,
x0=[-0], # default values
bounds=[(-5, 5)], # bounds of each x
maxfun=max_fun, # maximal number of function evaluations
rng=1234 # random seed
)
runhistory = smac.get_runhistory()
# extract x value and corresponding y value
x_smac = []
y_smac = []
for entry in runhistory.data: # iterate over data because it is an OrderedDict
config_id = entry.config_id # look up config id
config = runhistory.ids_config[config_id] # look up config
y_ = runhistory.get_cost(config) # get cost
x_ = config["x1"] # there is only one entry in our example
x_smac.append(x_)
y_smac.append(y_)
x_smac = np.array(x_smac)
y_smac = np.array(y_smac)
return smac, x_smac, y_smac
def plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):
"""
plot function with all evaluated points,
EI acquisition function
Predictions with uncertainties
"""
from smac.optimizer.acquisition import EI
# cost all points for x
step = step or len(x_smac)
x_smac_ = np.array([[x] for x in x_smac[:step]])
y_smac_ = np.array([[y] for y in y_smac[:step]])
# as an alternative, we could extract the points from the runhistory again
# but these points will be scaled to a unit-hypercube
# X, Y = smac.solver.rh2EPM.transform(runhistory)
model.train(x_smac_, y_smac_)
acq_func = EI(model=model)
acq_func.update(model=model, eta=np.min(y_smac))
x_points_ = np.array([[x] for x in x_points])
acq_values = acq_func._compute(X=x_points_)[:, 0]
# plot acquisition function
y_mean, y_var = model.predict(x_points_)
y_mean = y_mean[:, 0]
y_std = np.sqrt(y_var)[:, 0]
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x_points, acq_values)
plt.title("Aquisition Function")
plt.savefig('fig%da.pdf' % step)
# plot uncertainties
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x_points, y_mean)
ax1.fill_between(x_points, y_mean - y_std,
y_mean + y_std, alpha=0.5)
ax1.plot(x_smac[:step], y_smac[:step], 'bo')
ax1.plot(x_smac[:step], y_smac[:step], 'ro')
ax1.plot(x_points, y_points, '--')
plt.title("Uncertainty Predictions")
plt.savefig('fig%db.pdf' % step)
def clean_smac_shit():
import os
import shutil
for f in os.listdir('.'):
if f.startswith('smac3-output_'):
shutil.rmtree(f)
if __name__ == '__main__':
from smac.epm.rf_with_instances import RandomForestWithInstances
x_points = np.linspace(start=-5, stop=5, num=100)
y_points = list(map(test_func, map(lambda x: [x], x_points)))
smac, x_smac, y_smac = run_smac()
types, bounds = np.array([0]), np.array([[0.0, 1.0]])
model = RandomForestWithInstances(types=types,
bounds=bounds,
instance_features=None,
seed=12345,
pca_components=12345,
ratio_features=1,
num_trees=1000,
min_samples_split=1,
min_samples_leaf=1,
max_depth=100000,
do_bootstrapping=False,
n_points_per_tree=-1,
eps_purity=0
)
for i in range(10):
plot_state(smac, model, x_points, y_points, x_smac, y_smac, i+1)
clean_smac_shit()
|
[
2,
4,
5,
6,
7
] |
2,456 |
2b1ec422a42af59a048c708f86b686eb0564b51f
|
<mask token>
|
<mask token>
urlpatterns = [url('^$', SprintListView.as_view(), name='sprint_list'),
path('create/', view=CreateSprintView.as_view(), name='create_sprint'),
path('modificar/<int:sprint_pk>/', view=UpdateSprintView.as_view(),
name='update_sprint'), path('<int:sprint_pk>/asignarus/', view=
AsignarUSUpdateView.as_view(), name='asignar_us'), path(
'<int:sprint_pk>/tableros/<int:flujo_pk>/', view=TableroTemplateView.
as_view(), name='tablero'), path(route='ver/<int:pk>/', view=
VerSprintDetailView.as_view(), name='ver_sprint'), path(route=
'<int:sprint_pk>/sprintbacklogpdf/', view=SprintBacklogPDF.as_view(),
name='reporte_sb'), path(route='<int:sprint_pk>/prioridades/', view=
PrioridadesPDF.as_view(), name='prioridades')]
|
from django.conf.urls import url
from django.urls import path
from .views import *
from flujo.views import *
<mask token>
urlpatterns = [url('^$', SprintListView.as_view(), name='sprint_list'),
path('create/', view=CreateSprintView.as_view(), name='create_sprint'),
path('modificar/<int:sprint_pk>/', view=UpdateSprintView.as_view(),
name='update_sprint'), path('<int:sprint_pk>/asignarus/', view=
AsignarUSUpdateView.as_view(), name='asignar_us'), path(
'<int:sprint_pk>/tableros/<int:flujo_pk>/', view=TableroTemplateView.
as_view(), name='tablero'), path(route='ver/<int:pk>/', view=
VerSprintDetailView.as_view(), name='ver_sprint'), path(route=
'<int:sprint_pk>/sprintbacklogpdf/', view=SprintBacklogPDF.as_view(),
name='reporte_sb'), path(route='<int:sprint_pk>/prioridades/', view=
PrioridadesPDF.as_view(), name='prioridades')]
|
from django.conf.urls import url
from django.urls import path
from .views import *
from flujo.views import *
"""
URL para el Sprint crear, listar y modificar
"""
urlpatterns = [
url(r'^$', SprintListView.as_view(), name='sprint_list'),
path('create/', view=CreateSprintView.as_view(), name='create_sprint'),
path('modificar/<int:sprint_pk>/', view=UpdateSprintView.as_view(), name='update_sprint'),
path('<int:sprint_pk>/asignarus/', view=AsignarUSUpdateView.as_view(), name='asignar_us'),
path('<int:sprint_pk>/tableros/<int:flujo_pk>/', view=TableroTemplateView.as_view(), name='tablero'),
path(route='ver/<int:pk>/', view=VerSprintDetailView.as_view(), name='ver_sprint'),
path(route='<int:sprint_pk>/sprintbacklogpdf/', view=SprintBacklogPDF.as_view(), name="reporte_sb"),
path(route='<int:sprint_pk>/prioridades/', view=PrioridadesPDF.as_view(), name="prioridades")
]
| null |
[
0,
1,
2,
3
] |
2,457 |
a555226b14223dca688d10b811eb36fb229360ce
|
<mask token>
class UIMainWindow(object):
def __init__(self):
font = QtGui.QFont()
font.setFamily('Myriad Pro')
font.setPointSize(14)
self.main_window = QtWidgets.QWidget()
self.main_window.setFont(font)
self.main_window.setObjectName('main_window')
self.main_window.setWindowModality(QtCore.Qt.WindowModal)
self.main_window.resize(450, 460)
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(self.main_window.sizePolicy().
hasHeightForWidth())
self.main_window.setSizePolicy(size_policy)
self.main_window.setMinimumSize(QtCore.QSize(450, 460))
self.main_window.setMaximumSize(QtCore.QSize(450, 460))
self.main_window.setBaseSize(QtCore.QSize(450, 460))
self.branding_icon = QtWidgets.QLabel(self.main_window)
self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))
self.branding_icon.setText('')
self.branding_icon.setPixmap(QtGui.QPixmap(
'../images/senticompare_logo.png'))
self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.
AlignVCenter)
self.branding_icon.setObjectName('branding_icon')
self.branding_label = QtWidgets.QLabel(self.main_window)
self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.branding_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily('Optima')
font.setPointSize(50)
self.branding_label.setFont(font)
self.branding_label.setObjectName('branding_label')
self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)
self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410,
430, 50))
self.horizontal_layout_widget_1.setObjectName(
'horizontal_layout_widget_1')
self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.
horizontal_layout_widget_1)
self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_1.setObjectName('horizontal_layout_1')
self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1
)
self.run_button.setObjectName('run_button')
self.run_button.clicked.connect(self.run)
self.horizontal_layout_1.addWidget(self.run_button)
self.quit_button = QtWidgets.QPushButton(self.
horizontal_layout_widget_1)
self.quit_button.setObjectName('quit_button')
self.quit_button.clicked.connect(self.main_window.close)
self.horizontal_layout_1.addWidget(self.quit_button)
self.select_files_tab = QtWidgets.QWidget()
self.select_files_tab.setObjectName('select_files_tab')
self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.
select_files_tab)
self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230,
230, 50))
self.horizontal_layout_widget_2.setObjectName(
'horizontal_layout_widget_2')
self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.
horizontal_layout_widget_2)
self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_2.setObjectName('horizontal_layout_2')
font.setFamily('Myriad Pro')
font.setPointSize(12)
self.input_output_box = QtWidgets.QTabWidget(self.main_window)
self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))
self.input_output_box.setFont(font)
self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.
PointingHandCursor))
self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)
self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)
self.input_output_box.setTabsClosable(False)
self.input_output_box.setObjectName('input_output_box')
self.file_view = QtWidgets.QListView(self.select_files_tab)
self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))
self.file_view.setObjectName('file_view')
self.file_view_model = QStandardItemModel(self.file_view)
self.file_view.setModel(self.file_view_model)
self.file_view.show()
self.input_output_box.addTab(self.select_files_tab, '')
self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2
)
self.add_button.setFont(font)
self.add_button.setObjectName('add_button')
self.add_button.clicked.connect(self.selectFiles)
self.horizontal_layout_2.addWidget(self.add_button)
self.delete_button = QtWidgets.QPushButton(self.
horizontal_layout_widget_2)
self.delete_button.setFont(font)
self.delete_button.setObjectName('delete_button')
self.delete_button.clicked.connect(self.removeFiles)
self.horizontal_layout_2.addWidget(self.delete_button)
self.manual_input_tab = QtWidgets.QWidget()
self.manual_input_tab.setObjectName('manual_input_tab')
self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)
self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.text_input.setObjectName('text_input')
self.input_output_box.addTab(self.manual_input_tab, '')
self.results_tab = QtWidgets.QWidget()
self.results_tab.setObjectName('results_tab')
self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)
self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.results_scroll_box.setWidgetResizable(True)
self.results_scroll_box.setObjectName('results_scroll_box')
self.results_content = QtWidgets.QWidget()
self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))
self.results_content.setObjectName('results_content')
self.results_scroll_box.setWidget(self.results_content)
self.results_content_text = QtWidgets.QTextEdit(self.results_content)
self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))
self.results_content_text.setReadOnly(True)
self.results_content_text.setObjectName('results_content_text')
self.input_output_box.addTab(self.results_tab, '')
self.input_output_box.setTabEnabled(2, False)
font.setPointSize(14)
self.group_box_1 = QtWidgets.QGroupBox(self.main_window)
self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))
self.group_box_1.setFont(font)
self.group_box_1.setTitle('')
self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)
self.group_box_1.setFlat(False)
self.group_box_1.setCheckable(False)
self.group_box_1.setObjectName('group_box_1')
self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)
self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))
self.vertical_layout_widget_1.setObjectName('vertical_layout_widget_1')
self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.
vertical_layout_widget_1)
self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)
self.vertical_layout_1.setObjectName('vertical_layout_1')
self.pronoun_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.pronoun_checkbox.setFont(font)
self.pronoun_checkbox.setObjectName('pronoun_checkbox')
self.vertical_layout_1.addWidget(self.pronoun_checkbox)
self.lexical_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.lexical_checkbox.setFont(font)
self.lexical_checkbox.setObjectName('lexical_checkbox')
self.vertical_layout_1.addWidget(self.lexical_checkbox)
self.rule_based_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.rule_based_checkbox.setFont(font)
self.rule_based_checkbox.setObjectName('rule_based_checkbox')
self.vertical_layout_1.addWidget(self.rule_based_checkbox)
self.machine_learning_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.machine_learning_checkbox.setFont(font)
self.machine_learning_checkbox.setObjectName(
'machine_learning_checkbox')
self.vertical_layout_1.addWidget(self.machine_learning_checkbox)
self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)
self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))
self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)
self.help_scroll_box.setWidgetResizable(True)
self.help_scroll_box.setObjectName('help_scroll_box')
self.help_content = QtWidgets.QWidget()
self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))
self.help_content.setObjectName('help_content')
self.help_scroll_box.setWidget(self.help_content)
self.selected_files = {}
self.input_output_box.setCurrentIndex(0)
self.retranslateUI()
QtCore.QMetaObject.connectSlotsByName(self.main_window)
def retranslateUI(self):
_translate = QtCore.QCoreApplication.translate
self.main_window.setWindowTitle(_translate('main_window',
'SentiCompare'))
self.add_button.setText(_translate('main_window', 'Add'))
self.delete_button.setText(_translate('main_window', 'Delete'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.select_files_tab), _translate('main_window', 'Select Files'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.manual_input_tab), _translate('main_window', 'Manual Input'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.results_tab), _translate('main_window', 'Results'))
self.run_button.setText(_translate('main_window', 'Run'))
self.quit_button.setText(_translate('main_window', 'Quit'))
self.pronoun_checkbox.setText(_translate('main_window',
'Pronoun Usage'))
self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))
self.rule_based_checkbox.setText(_translate('main_window',
'Rule Based'))
self.machine_learning_checkbox.setText(_translate('main_window',
'Machine Learning'))
self.branding_label.setText(_translate('main_window', 'SentiCompare'))
def showWindow(self):
self.main_window.show()
def selectFiles(self):
file_dialog = FileDialog(self.main_window)
file_dialog.setFilters(['Text files (*.txt)'])
file_dialog.setDefaultFilterIndex = 0
file_dialog.setDefaultDirectory(os.path.expanduser('~'))
file_dialog.exec()
if file_dialog.getPath() == '':
return
elif file_dialog.getFilename()[2] == '':
for file in os.listdir(file_dialog.getPath()):
if file.endswith('.txt') and not file.startswith('.'):
file_path = os.path.join(file_dialog.getPath(), file)
if file_path not in self.selected_files:
self.selected_files[file] = file_path
item = QStandardItem(file)
item.setCheckable(True)
self.file_view_model.appendRow(item)
elif file_dialog.getPath() not in self.selected_files:
self.selected_files[file_dialog.getFilename()[1]
] = file_dialog.getPath()
item = QStandardItem(file_dialog.getFilename()[1])
item.setCheckable(True)
self.file_view_model.appendRow(item)
<mask token>
<mask token>
|
<mask token>
class UIMainWindow(object):
def __init__(self):
font = QtGui.QFont()
font.setFamily('Myriad Pro')
font.setPointSize(14)
self.main_window = QtWidgets.QWidget()
self.main_window.setFont(font)
self.main_window.setObjectName('main_window')
self.main_window.setWindowModality(QtCore.Qt.WindowModal)
self.main_window.resize(450, 460)
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(self.main_window.sizePolicy().
hasHeightForWidth())
self.main_window.setSizePolicy(size_policy)
self.main_window.setMinimumSize(QtCore.QSize(450, 460))
self.main_window.setMaximumSize(QtCore.QSize(450, 460))
self.main_window.setBaseSize(QtCore.QSize(450, 460))
self.branding_icon = QtWidgets.QLabel(self.main_window)
self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))
self.branding_icon.setText('')
self.branding_icon.setPixmap(QtGui.QPixmap(
'../images/senticompare_logo.png'))
self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.
AlignVCenter)
self.branding_icon.setObjectName('branding_icon')
self.branding_label = QtWidgets.QLabel(self.main_window)
self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.branding_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily('Optima')
font.setPointSize(50)
self.branding_label.setFont(font)
self.branding_label.setObjectName('branding_label')
self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)
self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410,
430, 50))
self.horizontal_layout_widget_1.setObjectName(
'horizontal_layout_widget_1')
self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.
horizontal_layout_widget_1)
self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_1.setObjectName('horizontal_layout_1')
self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1
)
self.run_button.setObjectName('run_button')
self.run_button.clicked.connect(self.run)
self.horizontal_layout_1.addWidget(self.run_button)
self.quit_button = QtWidgets.QPushButton(self.
horizontal_layout_widget_1)
self.quit_button.setObjectName('quit_button')
self.quit_button.clicked.connect(self.main_window.close)
self.horizontal_layout_1.addWidget(self.quit_button)
self.select_files_tab = QtWidgets.QWidget()
self.select_files_tab.setObjectName('select_files_tab')
self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.
select_files_tab)
self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230,
230, 50))
self.horizontal_layout_widget_2.setObjectName(
'horizontal_layout_widget_2')
self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.
horizontal_layout_widget_2)
self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_2.setObjectName('horizontal_layout_2')
font.setFamily('Myriad Pro')
font.setPointSize(12)
self.input_output_box = QtWidgets.QTabWidget(self.main_window)
self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))
self.input_output_box.setFont(font)
self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.
PointingHandCursor))
self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)
self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)
self.input_output_box.setTabsClosable(False)
self.input_output_box.setObjectName('input_output_box')
self.file_view = QtWidgets.QListView(self.select_files_tab)
self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))
self.file_view.setObjectName('file_view')
self.file_view_model = QStandardItemModel(self.file_view)
self.file_view.setModel(self.file_view_model)
self.file_view.show()
self.input_output_box.addTab(self.select_files_tab, '')
self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2
)
self.add_button.setFont(font)
self.add_button.setObjectName('add_button')
self.add_button.clicked.connect(self.selectFiles)
self.horizontal_layout_2.addWidget(self.add_button)
self.delete_button = QtWidgets.QPushButton(self.
horizontal_layout_widget_2)
self.delete_button.setFont(font)
self.delete_button.setObjectName('delete_button')
self.delete_button.clicked.connect(self.removeFiles)
self.horizontal_layout_2.addWidget(self.delete_button)
self.manual_input_tab = QtWidgets.QWidget()
self.manual_input_tab.setObjectName('manual_input_tab')
self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)
self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.text_input.setObjectName('text_input')
self.input_output_box.addTab(self.manual_input_tab, '')
self.results_tab = QtWidgets.QWidget()
self.results_tab.setObjectName('results_tab')
self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)
self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.results_scroll_box.setWidgetResizable(True)
self.results_scroll_box.setObjectName('results_scroll_box')
self.results_content = QtWidgets.QWidget()
self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))
self.results_content.setObjectName('results_content')
self.results_scroll_box.setWidget(self.results_content)
self.results_content_text = QtWidgets.QTextEdit(self.results_content)
self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))
self.results_content_text.setReadOnly(True)
self.results_content_text.setObjectName('results_content_text')
self.input_output_box.addTab(self.results_tab, '')
self.input_output_box.setTabEnabled(2, False)
font.setPointSize(14)
self.group_box_1 = QtWidgets.QGroupBox(self.main_window)
self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))
self.group_box_1.setFont(font)
self.group_box_1.setTitle('')
self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)
self.group_box_1.setFlat(False)
self.group_box_1.setCheckable(False)
self.group_box_1.setObjectName('group_box_1')
self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)
self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))
self.vertical_layout_widget_1.setObjectName('vertical_layout_widget_1')
self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.
vertical_layout_widget_1)
self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)
self.vertical_layout_1.setObjectName('vertical_layout_1')
self.pronoun_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.pronoun_checkbox.setFont(font)
self.pronoun_checkbox.setObjectName('pronoun_checkbox')
self.vertical_layout_1.addWidget(self.pronoun_checkbox)
self.lexical_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.lexical_checkbox.setFont(font)
self.lexical_checkbox.setObjectName('lexical_checkbox')
self.vertical_layout_1.addWidget(self.lexical_checkbox)
self.rule_based_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.rule_based_checkbox.setFont(font)
self.rule_based_checkbox.setObjectName('rule_based_checkbox')
self.vertical_layout_1.addWidget(self.rule_based_checkbox)
self.machine_learning_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.machine_learning_checkbox.setFont(font)
self.machine_learning_checkbox.setObjectName(
'machine_learning_checkbox')
self.vertical_layout_1.addWidget(self.machine_learning_checkbox)
self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)
self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))
self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)
self.help_scroll_box.setWidgetResizable(True)
self.help_scroll_box.setObjectName('help_scroll_box')
self.help_content = QtWidgets.QWidget()
self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))
self.help_content.setObjectName('help_content')
self.help_scroll_box.setWidget(self.help_content)
self.selected_files = {}
self.input_output_box.setCurrentIndex(0)
self.retranslateUI()
QtCore.QMetaObject.connectSlotsByName(self.main_window)
def retranslateUI(self):
_translate = QtCore.QCoreApplication.translate
self.main_window.setWindowTitle(_translate('main_window',
'SentiCompare'))
self.add_button.setText(_translate('main_window', 'Add'))
self.delete_button.setText(_translate('main_window', 'Delete'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.select_files_tab), _translate('main_window', 'Select Files'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.manual_input_tab), _translate('main_window', 'Manual Input'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.results_tab), _translate('main_window', 'Results'))
self.run_button.setText(_translate('main_window', 'Run'))
self.quit_button.setText(_translate('main_window', 'Quit'))
self.pronoun_checkbox.setText(_translate('main_window',
'Pronoun Usage'))
self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))
self.rule_based_checkbox.setText(_translate('main_window',
'Rule Based'))
self.machine_learning_checkbox.setText(_translate('main_window',
'Machine Learning'))
self.branding_label.setText(_translate('main_window', 'SentiCompare'))
def showWindow(self):
self.main_window.show()
def selectFiles(self):
file_dialog = FileDialog(self.main_window)
file_dialog.setFilters(['Text files (*.txt)'])
file_dialog.setDefaultFilterIndex = 0
file_dialog.setDefaultDirectory(os.path.expanduser('~'))
file_dialog.exec()
if file_dialog.getPath() == '':
return
elif file_dialog.getFilename()[2] == '':
for file in os.listdir(file_dialog.getPath()):
if file.endswith('.txt') and not file.startswith('.'):
file_path = os.path.join(file_dialog.getPath(), file)
if file_path not in self.selected_files:
self.selected_files[file] = file_path
item = QStandardItem(file)
item.setCheckable(True)
self.file_view_model.appendRow(item)
elif file_dialog.getPath() not in self.selected_files:
self.selected_files[file_dialog.getFilename()[1]
] = file_dialog.getPath()
item = QStandardItem(file_dialog.getFilename()[1])
item.setCheckable(True)
self.file_view_model.appendRow(item)
<mask token>
def run(self):
if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.
isChecked() or self.rule_based_checkbox.isChecked() or self.
machine_learning_checkbox.isChecked()):
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle('Missing Parameters')
message_box.setText(
"You haven't selected any methods of sentiment analysis. Please select at least one "
+ 'method from the list of options.')
message_box.exec_()
return
if self.input_output_box.currentIndex() == 2:
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle('Select Input')
message_box.setText(
'You must be on the "Select Files" page or the "Manual Input" page to run '
+
'an analysis. Please select one of those pages and try again.')
message_box.exec_()
return
else:
progress_bar = QtWidgets.QProgressDialog(
'Running Sentiment Analysis...', 'Cancel', 0, 100, self.
main_window)
progress_bar.setValue(0)
progress_bar.setCancelButton(None)
progress_bar.setWindowModality(QtCore.Qt.WindowModal)
progress_bar.resize(400, 50)
progress_bar.show()
if self.input_output_box.currentIndex() == 0:
sentiment_analyzer = SentimentAnalyzer(self.selected_files,
progress_bar, pronoun=self.pronoun_checkbox.isChecked(),
lexical=self.lexical_checkbox.isChecked(), rule_based=
self.rule_based_checkbox.isChecked(), machine_learning=
self.machine_learning_checkbox.isChecked())
else:
sentiment_analyzer = SentimentAnalyzer(self.text_input.
toPlainText(), progress_bar, pronoun=self.
pronoun_checkbox.isChecked(), lexical=self.
lexical_checkbox.isChecked(), rule_based=self.
rule_based_checkbox.isChecked(), machine_learning=self.
machine_learning_checkbox.isChecked())
results = sentiment_analyzer.runAnalyses()
progress_bar.close()
if results:
self.results_content_text.setText(results)
self.input_output_box.setTabEnabled(2, True)
self.input_output_box.setCurrentIndex(2)
else:
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle('Missing Input')
message_box.setText(
"You haven't added any input to analyze. Please select one or more files or "
+ 'input some data manually.')
message_box.exec_()
return
|
<mask token>
class UIMainWindow(object):
def __init__(self):
font = QtGui.QFont()
font.setFamily('Myriad Pro')
font.setPointSize(14)
self.main_window = QtWidgets.QWidget()
self.main_window.setFont(font)
self.main_window.setObjectName('main_window')
self.main_window.setWindowModality(QtCore.Qt.WindowModal)
self.main_window.resize(450, 460)
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(self.main_window.sizePolicy().
hasHeightForWidth())
self.main_window.setSizePolicy(size_policy)
self.main_window.setMinimumSize(QtCore.QSize(450, 460))
self.main_window.setMaximumSize(QtCore.QSize(450, 460))
self.main_window.setBaseSize(QtCore.QSize(450, 460))
self.branding_icon = QtWidgets.QLabel(self.main_window)
self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))
self.branding_icon.setText('')
self.branding_icon.setPixmap(QtGui.QPixmap(
'../images/senticompare_logo.png'))
self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.
AlignVCenter)
self.branding_icon.setObjectName('branding_icon')
self.branding_label = QtWidgets.QLabel(self.main_window)
self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.branding_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily('Optima')
font.setPointSize(50)
self.branding_label.setFont(font)
self.branding_label.setObjectName('branding_label')
self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)
self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410,
430, 50))
self.horizontal_layout_widget_1.setObjectName(
'horizontal_layout_widget_1')
self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.
horizontal_layout_widget_1)
self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_1.setObjectName('horizontal_layout_1')
self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1
)
self.run_button.setObjectName('run_button')
self.run_button.clicked.connect(self.run)
self.horizontal_layout_1.addWidget(self.run_button)
self.quit_button = QtWidgets.QPushButton(self.
horizontal_layout_widget_1)
self.quit_button.setObjectName('quit_button')
self.quit_button.clicked.connect(self.main_window.close)
self.horizontal_layout_1.addWidget(self.quit_button)
self.select_files_tab = QtWidgets.QWidget()
self.select_files_tab.setObjectName('select_files_tab')
self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.
select_files_tab)
self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230,
230, 50))
self.horizontal_layout_widget_2.setObjectName(
'horizontal_layout_widget_2')
self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.
horizontal_layout_widget_2)
self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_2.setObjectName('horizontal_layout_2')
font.setFamily('Myriad Pro')
font.setPointSize(12)
self.input_output_box = QtWidgets.QTabWidget(self.main_window)
self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))
self.input_output_box.setFont(font)
self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.
PointingHandCursor))
self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)
self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)
self.input_output_box.setTabsClosable(False)
self.input_output_box.setObjectName('input_output_box')
self.file_view = QtWidgets.QListView(self.select_files_tab)
self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))
self.file_view.setObjectName('file_view')
self.file_view_model = QStandardItemModel(self.file_view)
self.file_view.setModel(self.file_view_model)
self.file_view.show()
self.input_output_box.addTab(self.select_files_tab, '')
self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2
)
self.add_button.setFont(font)
self.add_button.setObjectName('add_button')
self.add_button.clicked.connect(self.selectFiles)
self.horizontal_layout_2.addWidget(self.add_button)
self.delete_button = QtWidgets.QPushButton(self.
horizontal_layout_widget_2)
self.delete_button.setFont(font)
self.delete_button.setObjectName('delete_button')
self.delete_button.clicked.connect(self.removeFiles)
self.horizontal_layout_2.addWidget(self.delete_button)
self.manual_input_tab = QtWidgets.QWidget()
self.manual_input_tab.setObjectName('manual_input_tab')
self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)
self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.text_input.setObjectName('text_input')
self.input_output_box.addTab(self.manual_input_tab, '')
self.results_tab = QtWidgets.QWidget()
self.results_tab.setObjectName('results_tab')
self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)
self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.results_scroll_box.setWidgetResizable(True)
self.results_scroll_box.setObjectName('results_scroll_box')
self.results_content = QtWidgets.QWidget()
self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))
self.results_content.setObjectName('results_content')
self.results_scroll_box.setWidget(self.results_content)
self.results_content_text = QtWidgets.QTextEdit(self.results_content)
self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))
self.results_content_text.setReadOnly(True)
self.results_content_text.setObjectName('results_content_text')
self.input_output_box.addTab(self.results_tab, '')
self.input_output_box.setTabEnabled(2, False)
font.setPointSize(14)
self.group_box_1 = QtWidgets.QGroupBox(self.main_window)
self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))
self.group_box_1.setFont(font)
self.group_box_1.setTitle('')
self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)
self.group_box_1.setFlat(False)
self.group_box_1.setCheckable(False)
self.group_box_1.setObjectName('group_box_1')
self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)
self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))
self.vertical_layout_widget_1.setObjectName('vertical_layout_widget_1')
self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.
vertical_layout_widget_1)
self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)
self.vertical_layout_1.setObjectName('vertical_layout_1')
self.pronoun_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.pronoun_checkbox.setFont(font)
self.pronoun_checkbox.setObjectName('pronoun_checkbox')
self.vertical_layout_1.addWidget(self.pronoun_checkbox)
self.lexical_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.lexical_checkbox.setFont(font)
self.lexical_checkbox.setObjectName('lexical_checkbox')
self.vertical_layout_1.addWidget(self.lexical_checkbox)
self.rule_based_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.rule_based_checkbox.setFont(font)
self.rule_based_checkbox.setObjectName('rule_based_checkbox')
self.vertical_layout_1.addWidget(self.rule_based_checkbox)
self.machine_learning_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.machine_learning_checkbox.setFont(font)
self.machine_learning_checkbox.setObjectName(
'machine_learning_checkbox')
self.vertical_layout_1.addWidget(self.machine_learning_checkbox)
self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)
self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))
self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)
self.help_scroll_box.setWidgetResizable(True)
self.help_scroll_box.setObjectName('help_scroll_box')
self.help_content = QtWidgets.QWidget()
self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))
self.help_content.setObjectName('help_content')
self.help_scroll_box.setWidget(self.help_content)
self.selected_files = {}
self.input_output_box.setCurrentIndex(0)
self.retranslateUI()
QtCore.QMetaObject.connectSlotsByName(self.main_window)
def retranslateUI(self):
_translate = QtCore.QCoreApplication.translate
self.main_window.setWindowTitle(_translate('main_window',
'SentiCompare'))
self.add_button.setText(_translate('main_window', 'Add'))
self.delete_button.setText(_translate('main_window', 'Delete'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.select_files_tab), _translate('main_window', 'Select Files'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.manual_input_tab), _translate('main_window', 'Manual Input'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.results_tab), _translate('main_window', 'Results'))
self.run_button.setText(_translate('main_window', 'Run'))
self.quit_button.setText(_translate('main_window', 'Quit'))
self.pronoun_checkbox.setText(_translate('main_window',
'Pronoun Usage'))
self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))
self.rule_based_checkbox.setText(_translate('main_window',
'Rule Based'))
self.machine_learning_checkbox.setText(_translate('main_window',
'Machine Learning'))
self.branding_label.setText(_translate('main_window', 'SentiCompare'))
def showWindow(self):
self.main_window.show()
def selectFiles(self):
file_dialog = FileDialog(self.main_window)
file_dialog.setFilters(['Text files (*.txt)'])
file_dialog.setDefaultFilterIndex = 0
file_dialog.setDefaultDirectory(os.path.expanduser('~'))
file_dialog.exec()
if file_dialog.getPath() == '':
return
elif file_dialog.getFilename()[2] == '':
for file in os.listdir(file_dialog.getPath()):
if file.endswith('.txt') and not file.startswith('.'):
file_path = os.path.join(file_dialog.getPath(), file)
if file_path not in self.selected_files:
self.selected_files[file] = file_path
item = QStandardItem(file)
item.setCheckable(True)
self.file_view_model.appendRow(item)
elif file_dialog.getPath() not in self.selected_files:
self.selected_files[file_dialog.getFilename()[1]
] = file_dialog.getPath()
item = QStandardItem(file_dialog.getFilename()[1])
item.setCheckable(True)
self.file_view_model.appendRow(item)
def removeFiles(self):
for i in range(self.file_view_model.rowCount() - 1, -1, -1):
if self.file_view_model.item(i).checkState():
filename = self.file_view_model.item(i).text()
del self.selected_files[filename]
self.file_view_model.removeRow(i)
def run(self):
if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.
isChecked() or self.rule_based_checkbox.isChecked() or self.
machine_learning_checkbox.isChecked()):
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle('Missing Parameters')
message_box.setText(
"You haven't selected any methods of sentiment analysis. Please select at least one "
+ 'method from the list of options.')
message_box.exec_()
return
if self.input_output_box.currentIndex() == 2:
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle('Select Input')
message_box.setText(
'You must be on the "Select Files" page or the "Manual Input" page to run '
+
'an analysis. Please select one of those pages and try again.')
message_box.exec_()
return
else:
progress_bar = QtWidgets.QProgressDialog(
'Running Sentiment Analysis...', 'Cancel', 0, 100, self.
main_window)
progress_bar.setValue(0)
progress_bar.setCancelButton(None)
progress_bar.setWindowModality(QtCore.Qt.WindowModal)
progress_bar.resize(400, 50)
progress_bar.show()
if self.input_output_box.currentIndex() == 0:
sentiment_analyzer = SentimentAnalyzer(self.selected_files,
progress_bar, pronoun=self.pronoun_checkbox.isChecked(),
lexical=self.lexical_checkbox.isChecked(), rule_based=
self.rule_based_checkbox.isChecked(), machine_learning=
self.machine_learning_checkbox.isChecked())
else:
sentiment_analyzer = SentimentAnalyzer(self.text_input.
toPlainText(), progress_bar, pronoun=self.
pronoun_checkbox.isChecked(), lexical=self.
lexical_checkbox.isChecked(), rule_based=self.
rule_based_checkbox.isChecked(), machine_learning=self.
machine_learning_checkbox.isChecked())
results = sentiment_analyzer.runAnalyses()
progress_bar.close()
if results:
self.results_content_text.setText(results)
self.input_output_box.setTabEnabled(2, True)
self.input_output_box.setCurrentIndex(2)
else:
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle('Missing Input')
message_box.setText(
"You haven't added any input to analyze. Please select one or more files or "
+ 'input some data manually.')
message_box.exec_()
return
|
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QStandardItem, QStandardItemModel
from PyQt5.QtWidgets import QMessageBox
from src import FileDialog, SentimentAnalyzer
class UIMainWindow(object):
def __init__(self):
font = QtGui.QFont()
font.setFamily('Myriad Pro')
font.setPointSize(14)
self.main_window = QtWidgets.QWidget()
self.main_window.setFont(font)
self.main_window.setObjectName('main_window')
self.main_window.setWindowModality(QtCore.Qt.WindowModal)
self.main_window.resize(450, 460)
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(self.main_window.sizePolicy().
hasHeightForWidth())
self.main_window.setSizePolicy(size_policy)
self.main_window.setMinimumSize(QtCore.QSize(450, 460))
self.main_window.setMaximumSize(QtCore.QSize(450, 460))
self.main_window.setBaseSize(QtCore.QSize(450, 460))
self.branding_icon = QtWidgets.QLabel(self.main_window)
self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))
self.branding_icon.setText('')
self.branding_icon.setPixmap(QtGui.QPixmap(
'../images/senticompare_logo.png'))
self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.
AlignVCenter)
self.branding_icon.setObjectName('branding_icon')
self.branding_label = QtWidgets.QLabel(self.main_window)
self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText,
brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.branding_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily('Optima')
font.setPointSize(50)
self.branding_label.setFont(font)
self.branding_label.setObjectName('branding_label')
self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)
self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410,
430, 50))
self.horizontal_layout_widget_1.setObjectName(
'horizontal_layout_widget_1')
self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.
horizontal_layout_widget_1)
self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_1.setObjectName('horizontal_layout_1')
self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1
)
self.run_button.setObjectName('run_button')
self.run_button.clicked.connect(self.run)
self.horizontal_layout_1.addWidget(self.run_button)
self.quit_button = QtWidgets.QPushButton(self.
horizontal_layout_widget_1)
self.quit_button.setObjectName('quit_button')
self.quit_button.clicked.connect(self.main_window.close)
self.horizontal_layout_1.addWidget(self.quit_button)
self.select_files_tab = QtWidgets.QWidget()
self.select_files_tab.setObjectName('select_files_tab')
self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.
select_files_tab)
self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230,
230, 50))
self.horizontal_layout_widget_2.setObjectName(
'horizontal_layout_widget_2')
self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.
horizontal_layout_widget_2)
self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_2.setObjectName('horizontal_layout_2')
font.setFamily('Myriad Pro')
font.setPointSize(12)
self.input_output_box = QtWidgets.QTabWidget(self.main_window)
self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))
self.input_output_box.setFont(font)
self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.
PointingHandCursor))
self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)
self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)
self.input_output_box.setTabsClosable(False)
self.input_output_box.setObjectName('input_output_box')
self.file_view = QtWidgets.QListView(self.select_files_tab)
self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))
self.file_view.setObjectName('file_view')
self.file_view_model = QStandardItemModel(self.file_view)
self.file_view.setModel(self.file_view_model)
self.file_view.show()
self.input_output_box.addTab(self.select_files_tab, '')
self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2
)
self.add_button.setFont(font)
self.add_button.setObjectName('add_button')
self.add_button.clicked.connect(self.selectFiles)
self.horizontal_layout_2.addWidget(self.add_button)
self.delete_button = QtWidgets.QPushButton(self.
horizontal_layout_widget_2)
self.delete_button.setFont(font)
self.delete_button.setObjectName('delete_button')
self.delete_button.clicked.connect(self.removeFiles)
self.horizontal_layout_2.addWidget(self.delete_button)
self.manual_input_tab = QtWidgets.QWidget()
self.manual_input_tab.setObjectName('manual_input_tab')
self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)
self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.text_input.setObjectName('text_input')
self.input_output_box.addTab(self.manual_input_tab, '')
self.results_tab = QtWidgets.QWidget()
self.results_tab.setObjectName('results_tab')
self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)
self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.results_scroll_box.setWidgetResizable(True)
self.results_scroll_box.setObjectName('results_scroll_box')
self.results_content = QtWidgets.QWidget()
self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))
self.results_content.setObjectName('results_content')
self.results_scroll_box.setWidget(self.results_content)
self.results_content_text = QtWidgets.QTextEdit(self.results_content)
self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))
self.results_content_text.setReadOnly(True)
self.results_content_text.setObjectName('results_content_text')
self.input_output_box.addTab(self.results_tab, '')
self.input_output_box.setTabEnabled(2, False)
font.setPointSize(14)
self.group_box_1 = QtWidgets.QGroupBox(self.main_window)
self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))
self.group_box_1.setFont(font)
self.group_box_1.setTitle('')
self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)
self.group_box_1.setFlat(False)
self.group_box_1.setCheckable(False)
self.group_box_1.setObjectName('group_box_1')
self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)
self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))
self.vertical_layout_widget_1.setObjectName('vertical_layout_widget_1')
self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.
vertical_layout_widget_1)
self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)
self.vertical_layout_1.setObjectName('vertical_layout_1')
self.pronoun_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.pronoun_checkbox.setFont(font)
self.pronoun_checkbox.setObjectName('pronoun_checkbox')
self.vertical_layout_1.addWidget(self.pronoun_checkbox)
self.lexical_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.lexical_checkbox.setFont(font)
self.lexical_checkbox.setObjectName('lexical_checkbox')
self.vertical_layout_1.addWidget(self.lexical_checkbox)
self.rule_based_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.rule_based_checkbox.setFont(font)
self.rule_based_checkbox.setObjectName('rule_based_checkbox')
self.vertical_layout_1.addWidget(self.rule_based_checkbox)
self.machine_learning_checkbox = QtWidgets.QCheckBox(self.
vertical_layout_widget_1)
self.machine_learning_checkbox.setFont(font)
self.machine_learning_checkbox.setObjectName(
'machine_learning_checkbox')
self.vertical_layout_1.addWidget(self.machine_learning_checkbox)
self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)
self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))
self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)
self.help_scroll_box.setWidgetResizable(True)
self.help_scroll_box.setObjectName('help_scroll_box')
self.help_content = QtWidgets.QWidget()
self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))
self.help_content.setObjectName('help_content')
self.help_scroll_box.setWidget(self.help_content)
self.selected_files = {}
self.input_output_box.setCurrentIndex(0)
self.retranslateUI()
QtCore.QMetaObject.connectSlotsByName(self.main_window)
def retranslateUI(self):
_translate = QtCore.QCoreApplication.translate
self.main_window.setWindowTitle(_translate('main_window',
'SentiCompare'))
self.add_button.setText(_translate('main_window', 'Add'))
self.delete_button.setText(_translate('main_window', 'Delete'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.select_files_tab), _translate('main_window', 'Select Files'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.manual_input_tab), _translate('main_window', 'Manual Input'))
self.input_output_box.setTabText(self.input_output_box.indexOf(self
.results_tab), _translate('main_window', 'Results'))
self.run_button.setText(_translate('main_window', 'Run'))
self.quit_button.setText(_translate('main_window', 'Quit'))
self.pronoun_checkbox.setText(_translate('main_window',
'Pronoun Usage'))
self.lexical_checkbox.setText(_translate('main_window', 'Lexical'))
self.rule_based_checkbox.setText(_translate('main_window',
'Rule Based'))
self.machine_learning_checkbox.setText(_translate('main_window',
'Machine Learning'))
self.branding_label.setText(_translate('main_window', 'SentiCompare'))
def showWindow(self):
self.main_window.show()
def selectFiles(self):
file_dialog = FileDialog(self.main_window)
file_dialog.setFilters(['Text files (*.txt)'])
file_dialog.setDefaultFilterIndex = 0
file_dialog.setDefaultDirectory(os.path.expanduser('~'))
file_dialog.exec()
if file_dialog.getPath() == '':
return
elif file_dialog.getFilename()[2] == '':
for file in os.listdir(file_dialog.getPath()):
if file.endswith('.txt') and not file.startswith('.'):
file_path = os.path.join(file_dialog.getPath(), file)
if file_path not in self.selected_files:
self.selected_files[file] = file_path
item = QStandardItem(file)
item.setCheckable(True)
self.file_view_model.appendRow(item)
elif file_dialog.getPath() not in self.selected_files:
self.selected_files[file_dialog.getFilename()[1]
] = file_dialog.getPath()
item = QStandardItem(file_dialog.getFilename()[1])
item.setCheckable(True)
self.file_view_model.appendRow(item)
def removeFiles(self):
for i in range(self.file_view_model.rowCount() - 1, -1, -1):
if self.file_view_model.item(i).checkState():
filename = self.file_view_model.item(i).text()
del self.selected_files[filename]
self.file_view_model.removeRow(i)
def run(self):
if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.
isChecked() or self.rule_based_checkbox.isChecked() or self.
machine_learning_checkbox.isChecked()):
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle('Missing Parameters')
message_box.setText(
"You haven't selected any methods of sentiment analysis. Please select at least one "
+ 'method from the list of options.')
message_box.exec_()
return
if self.input_output_box.currentIndex() == 2:
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle('Select Input')
message_box.setText(
'You must be on the "Select Files" page or the "Manual Input" page to run '
+
'an analysis. Please select one of those pages and try again.')
message_box.exec_()
return
else:
progress_bar = QtWidgets.QProgressDialog(
'Running Sentiment Analysis...', 'Cancel', 0, 100, self.
main_window)
progress_bar.setValue(0)
progress_bar.setCancelButton(None)
progress_bar.setWindowModality(QtCore.Qt.WindowModal)
progress_bar.resize(400, 50)
progress_bar.show()
if self.input_output_box.currentIndex() == 0:
sentiment_analyzer = SentimentAnalyzer(self.selected_files,
progress_bar, pronoun=self.pronoun_checkbox.isChecked(),
lexical=self.lexical_checkbox.isChecked(), rule_based=
self.rule_based_checkbox.isChecked(), machine_learning=
self.machine_learning_checkbox.isChecked())
else:
sentiment_analyzer = SentimentAnalyzer(self.text_input.
toPlainText(), progress_bar, pronoun=self.
pronoun_checkbox.isChecked(), lexical=self.
lexical_checkbox.isChecked(), rule_based=self.
rule_based_checkbox.isChecked(), machine_learning=self.
machine_learning_checkbox.isChecked())
results = sentiment_analyzer.runAnalyses()
progress_bar.close()
if results:
self.results_content_text.setText(results)
self.input_output_box.setTabEnabled(2, True)
self.input_output_box.setCurrentIndex(2)
else:
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle('Missing Input')
message_box.setText(
"You haven't added any input to analyze. Please select one or more files or "
+ 'input some data manually.')
message_box.exec_()
return
|
# ================================================== #
# MAIN WINDOW #
# ================================================== #
# Author: Brady Hammond #
# Created: 11/21/2017 #
# Last Edited: N/A #
# Last Edited By: N/A #
# ================================================== #
# FILE SETUP #
# ================================================== #
# Import statements
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QStandardItem, QStandardItemModel
from PyQt5.QtWidgets import QMessageBox
from src import FileDialog, SentimentAnalyzer
# ================================================== #
# CLASS DEFINITION #
# ================================================== #
# UIMainWindow class definition
class UIMainWindow(object):
# Define __init__ function
def __init__(self):
# Create main window
font = QtGui.QFont()
font.setFamily("Myriad Pro")
font.setPointSize(14)
self.main_window = QtWidgets.QWidget()
self.main_window.setFont(font)
self.main_window.setObjectName("main_window")
self.main_window.setWindowModality(QtCore.Qt.WindowModal)
self.main_window.resize(450, 460)
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(self.main_window.sizePolicy().hasHeightForWidth())
self.main_window.setSizePolicy(size_policy)
self.main_window.setMinimumSize(QtCore.QSize(450, 460))
self.main_window.setMaximumSize(QtCore.QSize(450, 460))
self.main_window.setBaseSize(QtCore.QSize(450, 460))
# Create branding icon
self.branding_icon = QtWidgets.QLabel(self.main_window)
self.branding_icon.setGeometry(QtCore.QRect(20, 5, 90, 90))
self.branding_icon.setText("")
self.branding_icon.setPixmap(QtGui.QPixmap("../images/senticompare_logo.png"))
self.branding_icon.setAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.AlignVCenter)
self.branding_icon.setObjectName("branding_icon")
# Create branding label
self.branding_label = QtWidgets.QLabel(self.main_window)
self.branding_label.setGeometry(QtCore.QRect(110, 5, 330, 90))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(81, 108, 146))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.branding_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Optima")
font.setPointSize(50)
self.branding_label.setFont(font)
self.branding_label.setObjectName("branding_label")
# Create first horizontal layout
self.horizontal_layout_widget_1 = QtWidgets.QWidget(self.main_window)
self.horizontal_layout_widget_1.setGeometry(QtCore.QRect(10, 410, 430, 50))
self.horizontal_layout_widget_1.setObjectName("horizontal_layout_widget_1")
self.horizontal_layout_1 = QtWidgets.QHBoxLayout(self.horizontal_layout_widget_1)
self.horizontal_layout_1.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_1.setObjectName("horizontal_layout_1")
# Create run button
self.run_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1)
self.run_button.setObjectName("run_button")
self.run_button.clicked.connect(self.run)
# Add run button to first horizontal layout
self.horizontal_layout_1.addWidget(self.run_button)
# Create quit button
self.quit_button = QtWidgets.QPushButton(self.horizontal_layout_widget_1)
self.quit_button.setObjectName("quit_button")
self.quit_button.clicked.connect(self.main_window.close)
# Add quit button to first horizontal layout
self.horizontal_layout_1.addWidget(self.quit_button)
# Create file selection tab
self.select_files_tab = QtWidgets.QWidget()
self.select_files_tab.setObjectName("select_files_tab")
# Create second horizontal layout
self.horizontal_layout_widget_2 = QtWidgets.QWidget(self.select_files_tab)
self.horizontal_layout_widget_2.setGeometry(QtCore.QRect(10, 230, 230, 50))
self.horizontal_layout_widget_2.setObjectName("horizontal_layout_widget_2")
self.horizontal_layout_2 = QtWidgets.QHBoxLayout(self.horizontal_layout_widget_2)
self.horizontal_layout_2.setContentsMargins(0, 0, 0, 0)
self.horizontal_layout_2.setObjectName("horizontal_layout_2")
# Create input/output tab window
font.setFamily("Myriad Pro")
font.setPointSize(12)
self.input_output_box = QtWidgets.QTabWidget(self.main_window)
self.input_output_box.setGeometry(QtCore.QRect(10, 100, 260, 300))
self.input_output_box.setFont(font)
self.input_output_box.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.input_output_box.setTabPosition(QtWidgets.QTabWidget.North)
self.input_output_box.setTabShape(QtWidgets.QTabWidget.Rounded)
self.input_output_box.setTabsClosable(False)
self.input_output_box.setObjectName("input_output_box")
# Create file view
self.file_view = QtWidgets.QListView(self.select_files_tab)
self.file_view.setGeometry(QtCore.QRect(10, 10, 235, 210))
self.file_view.setObjectName("file_view")
# Create file view model
self.file_view_model = QStandardItemModel(self.file_view)
# Add file view model to file view
self.file_view.setModel(self.file_view_model)
# Show file view
self.file_view.show()
# Add file selection tab to input/output tab window
self.input_output_box.addTab(self.select_files_tab, "")
# Create add button
self.add_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2)
self.add_button.setFont(font)
self.add_button.setObjectName("add_button")
self.add_button.clicked.connect(self.selectFiles)
# Add add button to second horizontal layout
self.horizontal_layout_2.addWidget(self.add_button)
# Create delete button
self.delete_button = QtWidgets.QPushButton(self.horizontal_layout_widget_2)
self.delete_button.setFont(font)
self.delete_button.setObjectName("delete_button")
self.delete_button.clicked.connect(self.removeFiles)
# Add delete button to second horizontal layout
self.horizontal_layout_2.addWidget(self.delete_button)
# Create manual input tab
self.manual_input_tab = QtWidgets.QWidget()
self.manual_input_tab.setObjectName("manual_input_tab")
# Create text input
self.text_input = QtWidgets.QTextEdit(self.manual_input_tab)
self.text_input.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.text_input.setObjectName("text_input")
# Add text input to manual input tab
self.input_output_box.addTab(self.manual_input_tab, "")
# Create results tab
self.results_tab = QtWidgets.QWidget()
self.results_tab.setObjectName("results_tab")
# Create results scroll box
self.results_scroll_box = QtWidgets.QScrollArea(self.results_tab)
self.results_scroll_box.setGeometry(QtCore.QRect(10, 10, 235, 250))
self.results_scroll_box.setWidgetResizable(True)
self.results_scroll_box.setObjectName("results_scroll_box")
# Create results content
self.results_content = QtWidgets.QWidget()
self.results_content.setGeometry(QtCore.QRect(0, 0, 230, 250))
self.results_content.setObjectName("results_content")
self.results_scroll_box.setWidget(self.results_content)
# Create results content text
self.results_content_text = QtWidgets.QTextEdit(self.results_content)
self.results_content_text.setGeometry(QtCore.QRect(-1, -1, 235, 250))
self.results_content_text.setReadOnly(True)
self.results_content_text.setObjectName("results_content_text")
# Add results tab to input/output tab window
self.input_output_box.addTab(self.results_tab, "")
# Disable results tab
self.input_output_box.setTabEnabled(2, False)
# Create first group box
font.setPointSize(14)
self.group_box_1 = QtWidgets.QGroupBox(self.main_window)
self.group_box_1.setGeometry(QtCore.QRect(280, 110, 160, 140))
self.group_box_1.setFont(font)
self.group_box_1.setTitle("")
self.group_box_1.setAlignment(QtCore.Qt.AlignCenter)
self.group_box_1.setFlat(False)
self.group_box_1.setCheckable(False)
self.group_box_1.setObjectName("group_box_1")
# Create first vertical layout
self.vertical_layout_widget_1 = QtWidgets.QWidget(self.group_box_1)
self.vertical_layout_widget_1.setGeometry(QtCore.QRect(9, 0, 141, 141))
self.vertical_layout_widget_1.setObjectName("vertical_layout_widget_1")
self.vertical_layout_1 = QtWidgets.QVBoxLayout(self.vertical_layout_widget_1)
self.vertical_layout_1.setContentsMargins(0, 0, 0, 0)
self.vertical_layout_1.setObjectName("vertical_layout_1")
# Create pronoun checkbox
self.pronoun_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.pronoun_checkbox.setFont(font)
self.pronoun_checkbox.setObjectName("pronoun_checkbox")
# Add pronoun checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.pronoun_checkbox)
# Create lexical checkbox
self.lexical_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.lexical_checkbox.setFont(font)
self.lexical_checkbox.setObjectName("lexical_checkbox")
# Add lexical checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.lexical_checkbox)
# Create rule based checkbox
self.rule_based_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.rule_based_checkbox.setFont(font)
self.rule_based_checkbox.setObjectName("rule_based_checkbox")
# Add rule_based checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.rule_based_checkbox)
# Create machine learning checkbox
self.machine_learning_checkbox = QtWidgets.QCheckBox(self.vertical_layout_widget_1)
self.machine_learning_checkbox.setFont(font)
self.machine_learning_checkbox.setObjectName("machine_learning_checkbox")
# Add machine learning checkbox to first vertical layout
self.vertical_layout_1.addWidget(self.machine_learning_checkbox)
# Create help scroll box
self.help_scroll_box = QtWidgets.QScrollArea(self.main_window)
self.help_scroll_box.setGeometry(QtCore.QRect(280, 260, 160, 140))
self.help_scroll_box.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.help_scroll_box.setFrameShadow(QtWidgets.QFrame.Sunken)
self.help_scroll_box.setWidgetResizable(True)
self.help_scroll_box.setObjectName("help_scroll_box")
# Create help content
self.help_content = QtWidgets.QWidget()
self.help_content.setGeometry(QtCore.QRect(0, 0, 158, 138))
self.help_content.setObjectName("help_content")
self.help_scroll_box.setWidget(self.help_content)
# Create selected files variable
self.selected_files = {}
# Set current tab
self.input_output_box.setCurrentIndex(0)
# Retranslate UI
self.retranslateUI()
# Connect UI slots
QtCore.QMetaObject.connectSlotsByName(self.main_window)
# ============================================== #
# Define retranslateUI function
def retranslateUI(self):
# Add text to ui elements
_translate = QtCore.QCoreApplication.translate
self.main_window.setWindowTitle(_translate("main_window", "SentiCompare"))
self.add_button.setText(_translate("main_window", "Add"))
self.delete_button.setText(_translate("main_window", "Delete"))
self.input_output_box.setTabText(self.input_output_box.indexOf(self.select_files_tab),
_translate("main_window", "Select Files"))
self.input_output_box.setTabText(self.input_output_box.indexOf(self.manual_input_tab),
_translate("main_window", "Manual Input"))
self.input_output_box.setTabText(self.input_output_box.indexOf(self.results_tab),
_translate("main_window", "Results"))
self.run_button.setText(_translate("main_window", "Run"))
self.quit_button.setText(_translate("main_window", "Quit"))
self.pronoun_checkbox.setText(_translate("main_window", "Pronoun Usage"))
self.lexical_checkbox.setText(_translate("main_window", "Lexical"))
self.rule_based_checkbox.setText(_translate("main_window", "Rule Based"))
self.machine_learning_checkbox.setText(_translate("main_window", "Machine Learning"))
self.branding_label.setText(_translate("main_window", "SentiCompare"))
# ============================================== #
# Define showWindow function
def showWindow(self):
self.main_window.show()
# ============================================== #
# Define selectFiles function
def selectFiles(self):
# Create file dialog
file_dialog = FileDialog(self.main_window)
file_dialog.setFilters(["Text files (*.txt)"])
file_dialog.setDefaultFilterIndex = 0
file_dialog.setDefaultDirectory(os.path.expanduser('~'))
file_dialog.exec()
# Return if nothing was selected
if file_dialog.getPath() == '':
return
# Add files from selected directory to file list
elif file_dialog.getFilename()[2] == '':
for file in os.listdir(file_dialog.getPath()):
if file.endswith('.txt') and not file.startswith('.'):
file_path = os.path.join(file_dialog.getPath(), file)
if file_path not in self.selected_files:
self.selected_files[file] = file_path
item = QStandardItem(file)
item.setCheckable(True)
self.file_view_model.appendRow(item)
# Add selected file to list
else:
if file_dialog.getPath() not in self.selected_files:
self.selected_files[file_dialog.getFilename()[1]] = file_dialog.getPath()
item = QStandardItem(file_dialog.getFilename()[1])
item.setCheckable(True)
self.file_view_model.appendRow(item)
# ============================================== #
# Define removeFiles function
def removeFiles(self):
# Remove all checked files
for i in range(self.file_view_model.rowCount() - 1, -1, -1):
if self.file_view_model.item(i).checkState():
filename = self.file_view_model.item(i).text()
del self.selected_files[filename]
self.file_view_model.removeRow(i)
# ============================================== #
# Define run function
def run(self):
# Check if an analysis method is selected
if not (self.pronoun_checkbox.isChecked() or self.lexical_checkbox.isChecked() or
self.rule_based_checkbox.isChecked() or self.machine_learning_checkbox.isChecked()):
# Create and show an error message
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle("Missing Parameters")
message_box.setText("You haven't selected any methods of sentiment analysis. Please select at least one " +
"method from the list of options.")
message_box.exec_()
return
# Check if the current tab is valid
if self.input_output_box.currentIndex() == 2:
# Create and show error message
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle("Select Input")
message_box.setText("You must be on the \"Select Files\" page or the \"Manual Input\" page to run " +
"an analysis. Please select one of those pages and try again.")
message_box.exec_()
return
else:
progress_bar = QtWidgets.QProgressDialog("Running Sentiment Analysis...", "Cancel", 0, 100, self.main_window)
progress_bar.setValue(0)
progress_bar.setCancelButton(None)
progress_bar.setWindowModality(QtCore.Qt.WindowModal)
progress_bar.resize(400, 50)
progress_bar.show()
# Analyze selected files
if self.input_output_box.currentIndex() == 0:
sentiment_analyzer = SentimentAnalyzer(self.selected_files, progress_bar, pronoun=self.pronoun_checkbox.isChecked(),
lexical=self.lexical_checkbox.isChecked(),
rule_based=self.rule_based_checkbox.isChecked(),
machine_learning=self.machine_learning_checkbox.isChecked())
# Analyze manual input
else:
sentiment_analyzer = SentimentAnalyzer(self.text_input.toPlainText(), progress_bar, pronoun=self.pronoun_checkbox.isChecked(),
lexical=self.lexical_checkbox.isChecked(),
rule_based=self.rule_based_checkbox.isChecked(),
machine_learning=self.machine_learning_checkbox.isChecked())
results = sentiment_analyzer.runAnalyses()
progress_bar.close()
if results:
self.results_content_text.setText(results)
self.input_output_box.setTabEnabled(2, True)
self.input_output_box.setCurrentIndex(2)
else:
message_box = QMessageBox()
message_box.setIcon(QMessageBox.Warning)
message_box.setWindowTitle("Missing Input")
message_box.setText("You haven't added any input to analyze. Please select one or more files or " +
"input some data manually.")
message_box.exec_()
return
# ================================================== #
# EOF #
# ================================================== #
|
[
5,
6,
7,
8,
9
] |
2,458 |
9a40861239268aa62075b77b3ed452f31bb14fac
|
<mask token>
def replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list
) ->float:
states = np.vstack([x[0] for x in train_batch])
actions = np.array([x[1] for x in train_batch])
rewards = np.array([x[2] for x in train_batch])
next_states = np.vstack([x[3] for x in train_batch])
done = np.array([x[4] for x in train_batch])
Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(
next_states), axis=1) * ~done
X = states
y = mainDQN.predict(states)
y[np.arange(len(states)), actions] = Q_target
return mainDQN.update(X, y)
def bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:
state = env.reset()
reward_sum = 0
while True:
env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print('\n Total Score : {}'.format(reward_sum))
break
def main():
replay_buffer = deque(maxlen=REPLAY_MEMORY)
last_100 = deque(maxlen=100)
step_list = []
loss_list = []
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')
targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')
sess.run(tf.global_variables_initializer())
copy_ops = get_copy_var_ops('main', 'target')
sess.run(copy_ops)
for episode in range(MAX_EPISODE):
e = 1.0 / (episode / 10 + 1)
done = False
step_count = 0
state = env.reset()
loss = 0
while not done:
if np.random.rand() < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_states, reward, done, _ = env.step(action)
if done:
reward = -1
replay_buffer.append((state, action, reward, next_states, done)
)
if len(replay_buffer) > BATCH_SIZE:
minibatch = random.sample(replay_buffer, BATCH_SIZE)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
if step_count % TARGET_UPDATE_FREQUENCY == 0:
sess.run(copy_ops)
state = next_states
step_count += 1
print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,
step_count, loss), end='\r')
step_list.append(step_count)
loss_list.append(loss)
last_100.append(step_count)
if len(last_100) == last_100.maxlen:
avg_reward = np.mean(last_100)
if avg_reward > 199:
print('\n game cleared, avg_reward : {}, episode : {}'.
format(avg_reward, episode + 1))
break
step_array = np.asarray(step_list)
loss_array = np.asarray(loss_list)
_, plot = plt.subplots(1, 2)
plot[0].plot(step_array)
plot[1].plot(loss_array)
plt.show()
<mask token>
|
<mask token>
def get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:
holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
holder.append(dest_var.assign(src_var.value()))
return holder
def replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list
) ->float:
states = np.vstack([x[0] for x in train_batch])
actions = np.array([x[1] for x in train_batch])
rewards = np.array([x[2] for x in train_batch])
next_states = np.vstack([x[3] for x in train_batch])
done = np.array([x[4] for x in train_batch])
Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(
next_states), axis=1) * ~done
X = states
y = mainDQN.predict(states)
y[np.arange(len(states)), actions] = Q_target
return mainDQN.update(X, y)
def bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:
state = env.reset()
reward_sum = 0
while True:
env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print('\n Total Score : {}'.format(reward_sum))
break
def main():
replay_buffer = deque(maxlen=REPLAY_MEMORY)
last_100 = deque(maxlen=100)
step_list = []
loss_list = []
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')
targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')
sess.run(tf.global_variables_initializer())
copy_ops = get_copy_var_ops('main', 'target')
sess.run(copy_ops)
for episode in range(MAX_EPISODE):
e = 1.0 / (episode / 10 + 1)
done = False
step_count = 0
state = env.reset()
loss = 0
while not done:
if np.random.rand() < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_states, reward, done, _ = env.step(action)
if done:
reward = -1
replay_buffer.append((state, action, reward, next_states, done)
)
if len(replay_buffer) > BATCH_SIZE:
minibatch = random.sample(replay_buffer, BATCH_SIZE)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
if step_count % TARGET_UPDATE_FREQUENCY == 0:
sess.run(copy_ops)
state = next_states
step_count += 1
print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,
step_count, loss), end='\r')
step_list.append(step_count)
loss_list.append(loss)
last_100.append(step_count)
if len(last_100) == last_100.maxlen:
avg_reward = np.mean(last_100)
if avg_reward > 199:
print('\n game cleared, avg_reward : {}, episode : {}'.
format(avg_reward, episode + 1))
break
step_array = np.asarray(step_list)
loss_array = np.asarray(loss_list)
_, plot = plt.subplots(1, 2)
plot[0].plot(step_array)
plot[1].plot(loss_array)
plt.show()
if __name__ == '__main__':
main()
|
<mask token>
env = gym.make('CartPole-v0')
INPUT_SIZE = env.observation_space.shape[0]
OUTPUT_SIZE = env.action_space.n
DISCOUNT_RATE = 0.9
REPLAY_MEMORY = 50000
BATCH_SIZE = 64
TARGET_UPDATE_FREQUENCY = 5
MAX_EPISODE = 1000
def get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:
holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
holder.append(dest_var.assign(src_var.value()))
return holder
def replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list
) ->float:
states = np.vstack([x[0] for x in train_batch])
actions = np.array([x[1] for x in train_batch])
rewards = np.array([x[2] for x in train_batch])
next_states = np.vstack([x[3] for x in train_batch])
done = np.array([x[4] for x in train_batch])
Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(
next_states), axis=1) * ~done
X = states
y = mainDQN.predict(states)
y[np.arange(len(states)), actions] = Q_target
return mainDQN.update(X, y)
def bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:
state = env.reset()
reward_sum = 0
while True:
env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print('\n Total Score : {}'.format(reward_sum))
break
def main():
replay_buffer = deque(maxlen=REPLAY_MEMORY)
last_100 = deque(maxlen=100)
step_list = []
loss_list = []
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')
targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')
sess.run(tf.global_variables_initializer())
copy_ops = get_copy_var_ops('main', 'target')
sess.run(copy_ops)
for episode in range(MAX_EPISODE):
e = 1.0 / (episode / 10 + 1)
done = False
step_count = 0
state = env.reset()
loss = 0
while not done:
if np.random.rand() < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_states, reward, done, _ = env.step(action)
if done:
reward = -1
replay_buffer.append((state, action, reward, next_states, done)
)
if len(replay_buffer) > BATCH_SIZE:
minibatch = random.sample(replay_buffer, BATCH_SIZE)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
if step_count % TARGET_UPDATE_FREQUENCY == 0:
sess.run(copy_ops)
state = next_states
step_count += 1
print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,
step_count, loss), end='\r')
step_list.append(step_count)
loss_list.append(loss)
last_100.append(step_count)
if len(last_100) == last_100.maxlen:
avg_reward = np.mean(last_100)
if avg_reward > 199:
print('\n game cleared, avg_reward : {}, episode : {}'.
format(avg_reward, episode + 1))
break
step_array = np.asarray(step_list)
loss_array = np.asarray(loss_list)
_, plot = plt.subplots(1, 2)
plot[0].plot(step_array)
plot[1].plot(loss_array)
plt.show()
if __name__ == '__main__':
main()
|
<mask token>
import numpy as np
import tensorflow as tf
from collections import deque
import random
import dqn
import gym
import matplotlib.pyplot as plt
env = gym.make('CartPole-v0')
INPUT_SIZE = env.observation_space.shape[0]
OUTPUT_SIZE = env.action_space.n
DISCOUNT_RATE = 0.9
REPLAY_MEMORY = 50000
BATCH_SIZE = 64
TARGET_UPDATE_FREQUENCY = 5
MAX_EPISODE = 1000
def get_copy_var_ops(src_scope_name: str, dest_scope_name: str) ->list:
holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=
dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
holder.append(dest_var.assign(src_var.value()))
return holder
def replay_train(mainDQN: dqn.DQN, targetDQN: dqn.DQN, train_batch: list
) ->float:
states = np.vstack([x[0] for x in train_batch])
actions = np.array([x[1] for x in train_batch])
rewards = np.array([x[2] for x in train_batch])
next_states = np.vstack([x[3] for x in train_batch])
done = np.array([x[4] for x in train_batch])
Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(
next_states), axis=1) * ~done
X = states
y = mainDQN.predict(states)
y[np.arange(len(states)), actions] = Q_target
return mainDQN.update(X, y)
def bot_play(mainDQN: dqn.DQN, env: gym.Env) ->None:
state = env.reset()
reward_sum = 0
while True:
env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print('\n Total Score : {}'.format(reward_sum))
break
def main():
replay_buffer = deque(maxlen=REPLAY_MEMORY)
last_100 = deque(maxlen=100)
step_list = []
loss_list = []
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='main')
targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name='target')
sess.run(tf.global_variables_initializer())
copy_ops = get_copy_var_ops('main', 'target')
sess.run(copy_ops)
for episode in range(MAX_EPISODE):
e = 1.0 / (episode / 10 + 1)
done = False
step_count = 0
state = env.reset()
loss = 0
while not done:
if np.random.rand() < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_states, reward, done, _ = env.step(action)
if done:
reward = -1
replay_buffer.append((state, action, reward, next_states, done)
)
if len(replay_buffer) > BATCH_SIZE:
minibatch = random.sample(replay_buffer, BATCH_SIZE)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
if step_count % TARGET_UPDATE_FREQUENCY == 0:
sess.run(copy_ops)
state = next_states
step_count += 1
print(' EP : {} | steps : {} | EP loss : {}'.format(episode + 1,
step_count, loss), end='\r')
step_list.append(step_count)
loss_list.append(loss)
last_100.append(step_count)
if len(last_100) == last_100.maxlen:
avg_reward = np.mean(last_100)
if avg_reward > 199:
print('\n game cleared, avg_reward : {}, episode : {}'.
format(avg_reward, episode + 1))
break
step_array = np.asarray(step_list)
loss_array = np.asarray(loss_list)
_, plot = plt.subplots(1, 2)
plot[0].plot(step_array)
plot[1].plot(loss_array)
plt.show()
if __name__ == '__main__':
main()
|
"""
openAI gym 'cart pole-v0'
"""
import numpy as np
import tensorflow as tf
from collections import deque
import random
import dqn
import gym
import matplotlib.pyplot as plt
# define environment
env = gym.make('CartPole-v0')
# define parameters
INPUT_SIZE = env.observation_space.shape[0]
OUTPUT_SIZE = env.action_space.n
# DISCOUNT_RATE : y = (1-dr)x + dr(r+f(x+1))
# REPLAY_MEMORY : memory size
# BATCH_SIZE : BATCH- training
# TARGET_UPDATE_FREQUENCY : targetW <- mainW each n
# MAX_EPISODE : n of trainning epoch
DISCOUNT_RATE = 0.9
REPLAY_MEMORY = 50000
BATCH_SIZE = 64
TARGET_UPDATE_FREQUENCY = 5
MAX_EPISODE = 1000
# copy targetW from mainW values
def get_copy_var_ops(src_scope_name:str, dest_scope_name:str)->list:
holder = []
src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope = src_scope_name)
dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope = dest_scope_name)
for src_var, dest_var in zip(src_vars, dest_vars):
holder.append(dest_var.assign(src_var.value()))
return holder
def replay_train(mainDQN:dqn.DQN, targetDQN:dqn.DQN, train_batch:list)->float:
states = np.vstack([x[0] for x in train_batch])
actions = np.array([x[1] for x in train_batch])
rewards = np.array([x[2] for x in train_batch])
next_states = np.vstack([x[3] for x in train_batch])
done = np.array([x[4] for x in train_batch])
Q_target = rewards + DISCOUNT_RATE*np.max(targetDQN.predict(next_states), axis=1)*~done
X = states
y = mainDQN.predict(states)
y[np.arange(len(states)), actions] = Q_target
return mainDQN.update(X,y)
def bot_play(mainDQN:dqn.DQN, env:gym.Env)->None:
state = env.reset()
reward_sum = 0
while True:
env.render()
action = np.argmax(mainDQN.predict(state))
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print("\n Total Score : {}".format(reward_sum))
break
def main():
replay_buffer = deque(maxlen=REPLAY_MEMORY)
last_100 = deque(maxlen=100)
step_list = []
loss_list = []
with tf.Session() as sess:
mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name="main")
targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name="target")
sess.run(tf.global_variables_initializer())
copy_ops = get_copy_var_ops("main","target")
sess.run(copy_ops)
for episode in range(MAX_EPISODE):
e = 1./ ((episode/10)+1)
done = False
step_count = 0
state = env.reset()
loss = 0
while not done:
if np.random.rand() < e:
action = env.action_space.sample()
else:
action = np.argmax(mainDQN.predict(state))
next_states, reward, done, _ = env.step(action)
if done:
reward = -1
replay_buffer.append((state, action, reward, next_states, done))
if len(replay_buffer) > BATCH_SIZE:
minibatch = random.sample(replay_buffer, BATCH_SIZE)
loss, _ = replay_train(mainDQN, targetDQN, minibatch)
if step_count % TARGET_UPDATE_FREQUENCY == 0:
sess.run(copy_ops)
state = next_states
step_count += 1
print(" EP : {} | steps : {} | EP loss : {}".format(episode+1, step_count, loss), end="\r")
step_list.append(step_count)
loss_list.append(loss)
last_100.append(step_count)
if len(last_100) == last_100.maxlen:
avg_reward = np.mean(last_100)
if avg_reward>199:
print("\n game cleared, avg_reward : {}, episode : {}".format(avg_reward, episode+1))
break
step_array = np.asarray(step_list)
loss_array = np.asarray(loss_list)
_, plot = plt.subplots(1,2)
plot[0].plot(step_array)
plot[1].plot(loss_array)
plt.show()
if __name__ == "__main__":
main()
|
[
3,
5,
6,
7,
8
] |
2,459 |
dcda8f26a06145579a9be6e5fbfdaed83d4908da
|
<mask token>
class TrieNode(object):
def __init__(self, char: str):
self.char = char
self.children = []
self.word_finished = False
self.counter = 1
self.OccurrenceList = {}
<mask token>
def insert(root, word: str, document):
node = root
for char in word:
found_in_child = False
for child in node.children:
if child.char == char:
child.counter += 1
node = child
found_in_child = True
break
if not found_in_child:
new_node = TrieNode(char)
node.children.append(new_node)
node = new_node
node.word_finished = True
if document not in node.OccurrenceList:
node.OccurrenceList[document] = 1
node.OccurrenceList[document] = node.OccurrenceList[document] + 1
def find_prefix(root, prefix: str) ->Tuple[bool, int]:
node = root
if not root.children:
return False, 0
for char in prefix:
char_not_found = True
for child in node.children:
if child.char == char:
char_not_found = False
node = child
break
if char_not_found:
print('Word Not Found: ' + prefix)
else:
print('Word Found: ' + prefix)
return True, node.OccurrenceList
<mask token>
|
<mask token>
class TrieNode(object):
def __init__(self, char: str):
self.char = char
self.children = []
self.word_finished = False
self.counter = 1
self.OccurrenceList = {}
<mask token>
def insert(root, word: str, document):
node = root
for char in word:
found_in_child = False
for child in node.children:
if child.char == char:
child.counter += 1
node = child
found_in_child = True
break
if not found_in_child:
new_node = TrieNode(char)
node.children.append(new_node)
node = new_node
node.word_finished = True
if document not in node.OccurrenceList:
node.OccurrenceList[document] = 1
node.OccurrenceList[document] = node.OccurrenceList[document] + 1
def find_prefix(root, prefix: str) ->Tuple[bool, int]:
node = root
if not root.children:
return False, 0
for char in prefix:
char_not_found = True
for child in node.children:
if child.char == char:
char_not_found = False
node = child
break
if char_not_found:
print('Word Not Found: ' + prefix)
else:
print('Word Found: ' + prefix)
return True, node.OccurrenceList
<mask token>
nltk.download('stopwords')
nltk.download('punkt')
<mask token>
stop_words.update(string.punctuation)
<mask token>
for file in files:
fname = file
file = open(fdata + str(file), encoding='utf8')
soup = BeautifulSoup(file.read(), 'html.parser')
[script.extract() for script in soup.findAll('script')]
[style.extract() for style in soup.findAll('style')]
words = word_tokenize(soup.get_text())
words = [i for i in words if all(j not in string.punctuation for j in i)]
for word in words:
if word.lower() not in stop_words and len(word) > 2 and word.isdigit(
) == False:
try:
word = word.lower().strip().encode('ascII')
except:
a = 1
else:
insert(root, word.decode('utf-8'), fname)
<mask token>
for word in inp:
boolw, dic = find_prefix(root, word.lower())
for key in dic:
if key not in rank:
rank[key] = dic[key]
else:
rank[key] = rank[key] + dic[key]
<mask token>
items.sort()
items.reverse()
if not items:
print('No results')
else:
print('Results : ')
for key in items:
print(key)
|
<mask token>
class TrieNode(object):
def __init__(self, char: str):
self.char = char
self.children = []
self.word_finished = False
self.counter = 1
self.OccurrenceList = {}
root = TrieNode('*')
def insert(root, word: str, document):
node = root
for char in word:
found_in_child = False
for child in node.children:
if child.char == char:
child.counter += 1
node = child
found_in_child = True
break
if not found_in_child:
new_node = TrieNode(char)
node.children.append(new_node)
node = new_node
node.word_finished = True
if document not in node.OccurrenceList:
node.OccurrenceList[document] = 1
node.OccurrenceList[document] = node.OccurrenceList[document] + 1
def find_prefix(root, prefix: str) ->Tuple[bool, int]:
node = root
if not root.children:
return False, 0
for char in prefix:
char_not_found = True
for child in node.children:
if child.char == char:
char_not_found = False
node = child
break
if char_not_found:
print('Word Not Found: ' + prefix)
else:
print('Word Found: ' + prefix)
return True, node.OccurrenceList
<mask token>
nltk.download('stopwords')
nltk.download('punkt')
<mask token>
stop_words = set(stopwords.words('english'))
stop_words.update(string.punctuation)
<mask token>
fdata = './input/'
files = os.listdir(fdata)
for file in files:
fname = file
file = open(fdata + str(file), encoding='utf8')
soup = BeautifulSoup(file.read(), 'html.parser')
[script.extract() for script in soup.findAll('script')]
[style.extract() for style in soup.findAll('style')]
words = word_tokenize(soup.get_text())
words = [i for i in words if all(j not in string.punctuation for j in i)]
for word in words:
if word.lower() not in stop_words and len(word) > 2 and word.isdigit(
) == False:
try:
word = word.lower().strip().encode('ascII')
except:
a = 1
else:
insert(root, word.decode('utf-8'), fname)
Enter = input('Please enter what you would like to search for: ')
inp = Enter.split(' ')
rank = {}
for word in inp:
boolw, dic = find_prefix(root, word.lower())
for key in dic:
if key not in rank:
rank[key] = dic[key]
else:
rank[key] = rank[key] + dic[key]
items = [(v, k) for k, v in rank.items()]
items.sort()
items.reverse()
if not items:
print('No results')
else:
print('Results : ')
for key in items:
print(key)
|
from typing import Tuple
class TrieNode(object):
def __init__(self, char: str):
self.char = char
self.children = []
self.word_finished = False
self.counter = 1
self.OccurrenceList = {}
root = TrieNode('*')
def insert(root, word: str, document):
node = root
for char in word:
found_in_child = False
for child in node.children:
if child.char == char:
child.counter += 1
node = child
found_in_child = True
break
if not found_in_child:
new_node = TrieNode(char)
node.children.append(new_node)
node = new_node
node.word_finished = True
if document not in node.OccurrenceList:
node.OccurrenceList[document] = 1
node.OccurrenceList[document] = node.OccurrenceList[document] + 1
def find_prefix(root, prefix: str) ->Tuple[bool, int]:
node = root
if not root.children:
return False, 0
for char in prefix:
char_not_found = True
for child in node.children:
if child.char == char:
char_not_found = False
node = child
break
if char_not_found:
print('Word Not Found: ' + prefix)
else:
print('Word Found: ' + prefix)
return True, node.OccurrenceList
from bs4 import BeautifulSoup
import nltk
nltk.download('stopwords')
nltk.download('punkt')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import re
import string
stop_words = set(stopwords.words('english'))
stop_words.update(string.punctuation)
import os
fdata = './input/'
files = os.listdir(fdata)
for file in files:
fname = file
file = open(fdata + str(file), encoding='utf8')
soup = BeautifulSoup(file.read(), 'html.parser')
[script.extract() for script in soup.findAll('script')]
[style.extract() for style in soup.findAll('style')]
words = word_tokenize(soup.get_text())
words = [i for i in words if all(j not in string.punctuation for j in i)]
for word in words:
if word.lower() not in stop_words and len(word) > 2 and word.isdigit(
) == False:
try:
word = word.lower().strip().encode('ascII')
except:
a = 1
else:
insert(root, word.decode('utf-8'), fname)
Enter = input('Please enter what you would like to search for: ')
inp = Enter.split(' ')
rank = {}
for word in inp:
boolw, dic = find_prefix(root, word.lower())
for key in dic:
if key not in rank:
rank[key] = dic[key]
else:
rank[key] = rank[key] + dic[key]
items = [(v, k) for k, v in rank.items()]
items.sort()
items.reverse()
if not items:
print('No results')
else:
print('Results : ')
for key in items:
print(key)
|
from typing import Tuple
#Creating a trie structure and it's node
class TrieNode(object):
def __init__(self, char: str):
self.char = char
self.children = []
#the last character of the word.`
self.word_finished = False
#counter for this character
self.counter = 1
#list of all the occurences of the prefix in the documents
self.OccurrenceList={}
#Initialize the root of the trie
root = TrieNode('*')
#Adding a word in the trie structure
def insert(root, word: str,document):
node = root
for char in word:
found_in_child = False
# Search for the character in the children of the present `node`
for child in node.children:
if child.char == char:
#the char of the word to be inserted is already present in trie; increment the counter of this char
child.counter += 1
# move the pointer to the node's child to continue the insertion of the rest of the word
node = child
found_in_child = True
break
# this char has never been inserted before, create node and insert it
if not found_in_child:
new_node = TrieNode(char)
node.children.append(new_node)
# And then point node to the new child
node = new_node
# At this point, word is inserted- we mark the end of this word
node.word_finished = True
if document not in node.OccurrenceList: #If document is not in OccurenceList for that word
node.OccurrenceList[document]=1 # Create a new key with document name
node.OccurrenceList[document]= node.OccurrenceList[document]+1 # We append the position in the document
#Performing the search in our files for the input word, using the trie structure we created above
#We will first check for the word's existence, if it exists- return file name and occurence number
def find_prefix(root, prefix: str) -> Tuple[bool, int]:
node = root
#handling the case of an empty trie ie the root node has no children
if not root.children:
return False, 0
for char in prefix:
char_not_found = True
# Search through all the children of the node the pointer is pointing to
for child in node.children:
if child.char == char:
#the char of the input word exists in trie
char_not_found = False
# increment the pointer to go further down the trie to check for the remaining chars in prefix
node = child
break
#letting the user know that the input word of prefix doesn't exist in the trie
if char_not_found:
print("Word Not Found: " +prefix)
#input word found, return the found status, along the files in which it exists
else:
print("Word Found: " +prefix)
return True,node.OccurrenceList
#for scrapping words from website
from bs4 import BeautifulSoup
import nltk
nltk.download('stopwords')
nltk.download('punkt')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import re
import string
stop_words = set(stopwords.words('english'))
stop_words.update(string.punctuation)
import os
#selecting file for scrapping into fdata->files
#please change the dircectory to run on your device
fdata = r"./input/"
files=os.listdir(fdata)
#cleaning the text in every every file from punctuations, stop words, digits, words less than length 2 and other symbols
for file in files:
fname=file #called later, while associating word with the file it exists in for insertion in trie
file=open(fdata+str(file), encoding="utf8")
soup = BeautifulSoup(file.read(), 'html.parser')
#filter the soup
[script.extract() for script in soup.findAll('script')]
[style.extract() for style in soup.findAll('style')]
#gather words from filtered soup
words = word_tokenize(soup.get_text())
# remove the words containing punctuation
words = [i for i in words if all(j not in string.punctuation for j in i)]
#filtering words and cleaning the data to insert in trie
for word in words:
if word.lower() not in stop_words and len(word) > 2 and word.isdigit() == False:
# build compressed trie tree
try:
# remove the words whcih can't encode to ascII
word = word.lower().strip().encode('ascII')
except:
# print word
a = 1
else:
#inserting words into tree
insert(root, word.decode("utf-8"), fname)
# Asking the user for input word that we search
Enter = input("Please enter what you would like to search for: ")
#In case if multiple word search
inp = Enter.split(' ')
rank = {}
#searching for each word of the input
for word in inp:
#search in trie, store the result in dic
boolw,dic = find_prefix(root, word.lower())
#ranking the files in which the word was present
for key in dic:
if key not in rank:
rank[key] = dic[key]
else:
rank[key] = rank[key] + dic[key]
#ranking website based on number of time word present - sort them in acsending order and reversing them so we display
# the websites in order of relevance
items=[(v,k) for k,v in rank.items()]
items.sort()
items.reverse()
#displaying search results
if not items:
print("No results")
else:
print("Results : ")
#printing all the files the input was found in, in order of maximum occurences
for key in items:
print(key)
|
[
4,
5,
6,
7,
8
] |
2,460 |
8afaa69d3a20c5e39e6321869f25dbd9020a5b3a
|
<mask token>
|
<mask token>
c.execute(q)
<mask token>
c.execute(q)
<mask token>
c.execute(q)
conn.commit()
|
<mask token>
conn = sqlite3.connect('blog.db')
c = conn.cursor()
q = 'CREATE TABLE users(Username text, Password text, UserID integer)'
c.execute(q)
q = (
'CREATE TABLE blogs(Title text, Content text, BlogID integer, UserID integer)'
)
c.execute(q)
q = (
'CREATE TABLE comments(Content text, CommentID integer, BlogID integer, UserID integer)'
)
c.execute(q)
conn.commit()
|
import sqlite3
conn = sqlite3.connect('blog.db')
c = conn.cursor()
q = 'CREATE TABLE users(Username text, Password text, UserID integer)'
c.execute(q)
q = (
'CREATE TABLE blogs(Title text, Content text, BlogID integer, UserID integer)'
)
c.execute(q)
q = (
'CREATE TABLE comments(Content text, CommentID integer, BlogID integer, UserID integer)'
)
c.execute(q)
conn.commit()
|
import sqlite3
conn = sqlite3.connect("blog.db")
c = conn.cursor()
q = "CREATE TABLE users(Username text, Password text, UserID integer)"
c.execute(q)
q = "CREATE TABLE blogs(Title text, Content text, BlogID integer, UserID integer)"
c.execute(q)
q = "CREATE TABLE comments(Content text, CommentID integer, BlogID integer, UserID integer)"
c.execute(q)
conn.commit()
|
[
0,
1,
2,
3,
4
] |
2,461 |
e5a698979bc84fe733a9bf5cd51e2f078956d468
|
<mask token>
class LoginRegistrationAction(LoginRegistration):
def check_welcome_xunyou(self):
return self.welcome_xunyou().text
def click_welcome_xunyou(self):
self.welcome_xunyou().click()
return self
def logged_in_random(self):
self.phone_id().send_keys('1831111{}'.format(random.randint(1000,
9999)))
return self
def logged_in_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_vip, 1)))
return self
def logged_in_not_vip_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_not_vip, 1)))
return self
def logged_in_appoint_183(self):
self.phone_id().send_keys('18333334444')
return self
def click_verification_code(self):
self.verification_code().click()
return VerificationCodeAction(self._driver)
def check_verification_code_enabled(self):
return self.verification_code().is_enabled()
def write_in_error_quantity(self):
self.phone_id().send_keys('1399999219392s我!3')
return self
def number_quantity(self):
return len(self.phone_id().text)
def click_privacy_agreement(self):
self.privacy_agreement().click()
return self
def click_service_agreement(self):
self.service_agreement().click()
return self
def click_exit_privacy_agreement(self):
self.exit_privacy_agreement().click()
return self
def click_exit_service_agreement(self):
self.exit_service_agreement().click()
return self
def check_keyboard_Delete(self):
return self.keyboard_Delete().text
<mask token>
def click_exit_logged_in(self):
self.exit_logged_in().click()
from page.test_accelerate_page import AccelerateHomeAction
return AccelerateHomeAction(self._driver)
def click_default_area_code(self):
self.default_area_code().click()
return self
<mask token>
def click_switch_area_code(self):
self.switch_area_code().click()
return self
def check_switch_area_code(self):
return self.switch_area_code().text
def check_memory_logged_in_number(self):
return self.memory_logged_in_number().text
|
<mask token>
class LoginRegistrationAction(LoginRegistration):
def check_welcome_xunyou(self):
return self.welcome_xunyou().text
def click_welcome_xunyou(self):
self.welcome_xunyou().click()
return self
def logged_in_random(self):
self.phone_id().send_keys('1831111{}'.format(random.randint(1000,
9999)))
return self
def logged_in_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_vip, 1)))
return self
def logged_in_not_vip_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_not_vip, 1)))
return self
def logged_in_appoint_183(self):
self.phone_id().send_keys('18333334444')
return self
def click_verification_code(self):
self.verification_code().click()
return VerificationCodeAction(self._driver)
def check_verification_code_enabled(self):
return self.verification_code().is_enabled()
def write_in_error_quantity(self):
self.phone_id().send_keys('1399999219392s我!3')
return self
def number_quantity(self):
return len(self.phone_id().text)
def click_privacy_agreement(self):
self.privacy_agreement().click()
return self
def click_service_agreement(self):
self.service_agreement().click()
return self
def click_exit_privacy_agreement(self):
self.exit_privacy_agreement().click()
return self
def click_exit_service_agreement(self):
self.exit_service_agreement().click()
return self
def check_keyboard_Delete(self):
return self.keyboard_Delete().text
<mask token>
def click_exit_logged_in(self):
self.exit_logged_in().click()
from page.test_accelerate_page import AccelerateHomeAction
return AccelerateHomeAction(self._driver)
def click_default_area_code(self):
self.default_area_code().click()
return self
def click_exit_area_code(self):
self.exit_area_code().click()
return self
def click_switch_area_code(self):
self.switch_area_code().click()
return self
def check_switch_area_code(self):
return self.switch_area_code().text
def check_memory_logged_in_number(self):
return self.memory_logged_in_number().text
|
<mask token>
class LoginRegistrationAction(LoginRegistration):
def check_welcome_xunyou(self):
return self.welcome_xunyou().text
def click_welcome_xunyou(self):
self.welcome_xunyou().click()
return self
def logged_in_random(self):
self.phone_id().send_keys('1831111{}'.format(random.randint(1000,
9999)))
return self
def logged_in_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_vip, 1)))
return self
def logged_in_not_vip_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_not_vip, 1)))
return self
def logged_in_appoint_183(self):
self.phone_id().send_keys('18333334444')
return self
def click_verification_code(self):
self.verification_code().click()
return VerificationCodeAction(self._driver)
def check_verification_code_enabled(self):
return self.verification_code().is_enabled()
def write_in_error_quantity(self):
self.phone_id().send_keys('1399999219392s我!3')
return self
def number_quantity(self):
return len(self.phone_id().text)
def click_privacy_agreement(self):
self.privacy_agreement().click()
return self
def click_service_agreement(self):
self.service_agreement().click()
return self
def click_exit_privacy_agreement(self):
self.exit_privacy_agreement().click()
return self
def click_exit_service_agreement(self):
self.exit_service_agreement().click()
return self
def check_keyboard_Delete(self):
return self.keyboard_Delete().text
def logged_in_assert(self):
assert '欢迎登录迅游' in self.check_welcome_xunyou()
return self
def click_exit_logged_in(self):
self.exit_logged_in().click()
from page.test_accelerate_page import AccelerateHomeAction
return AccelerateHomeAction(self._driver)
def click_default_area_code(self):
self.default_area_code().click()
return self
def click_exit_area_code(self):
self.exit_area_code().click()
return self
def click_switch_area_code(self):
self.switch_area_code().click()
return self
def check_switch_area_code(self):
return self.switch_area_code().text
def check_memory_logged_in_number(self):
return self.memory_logged_in_number().text
|
import random
from elment.login_registration_element import LoginRegistration
from page.test_verification_code_page import VerificationCodeAction
public_number_vip = ['17800000000', '17800000001', '17800000002',
'17800000003', '17800000004', '17800000005', '17800000006',
'17800000007', '17800000008', '17800000009']
public_number_not_vip = ['18381939440', '18381939441', '18381939445',
'18381939446']
class LoginRegistrationAction(LoginRegistration):
def check_welcome_xunyou(self):
return self.welcome_xunyou().text
def click_welcome_xunyou(self):
self.welcome_xunyou().click()
return self
def logged_in_random(self):
self.phone_id().send_keys('1831111{}'.format(random.randint(1000,
9999)))
return self
def logged_in_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_vip, 1)))
return self
def logged_in_not_vip_appoint(self):
self.phone_id().send_keys(str(random.sample(public_number_not_vip, 1)))
return self
def logged_in_appoint_183(self):
self.phone_id().send_keys('18333334444')
return self
def click_verification_code(self):
self.verification_code().click()
return VerificationCodeAction(self._driver)
def check_verification_code_enabled(self):
return self.verification_code().is_enabled()
def write_in_error_quantity(self):
self.phone_id().send_keys('1399999219392s我!3')
return self
def number_quantity(self):
return len(self.phone_id().text)
def click_privacy_agreement(self):
self.privacy_agreement().click()
return self
def click_service_agreement(self):
self.service_agreement().click()
return self
def click_exit_privacy_agreement(self):
self.exit_privacy_agreement().click()
return self
def click_exit_service_agreement(self):
self.exit_service_agreement().click()
return self
def check_keyboard_Delete(self):
return self.keyboard_Delete().text
def logged_in_assert(self):
assert '欢迎登录迅游' in self.check_welcome_xunyou()
return self
def click_exit_logged_in(self):
self.exit_logged_in().click()
from page.test_accelerate_page import AccelerateHomeAction
return AccelerateHomeAction(self._driver)
def click_default_area_code(self):
self.default_area_code().click()
return self
def click_exit_area_code(self):
self.exit_area_code().click()
return self
def click_switch_area_code(self):
self.switch_area_code().click()
return self
def check_switch_area_code(self):
return self.switch_area_code().text
def check_memory_logged_in_number(self):
return self.memory_logged_in_number().text
|
import random
from elment.login_registration_element import LoginRegistration
from page.test_verification_code_page import VerificationCodeAction
public_number_vip = ['17800000000','17800000001','17800000002','17800000003','17800000004','17800000005','17800000006',
'17800000007','17800000008','17800000009']
public_number_not_vip = ['18381939440', '18381939441', '18381939445', '18381939446']
class LoginRegistrationAction(LoginRegistration): # 登录页操作
def check_welcome_xunyou(self): # 欢迎登陆迅游text
return self.welcome_xunyou().text
def click_welcome_xunyou(self): # 点击欢迎登录迅游(可以将键盘降下去)
self.welcome_xunyou().click()
return self
def logged_in_random(self): # 点击号码栏输入随机账号
self.phone_id().send_keys('1831111{}'.format(random.randint(1000,9999)))
return self
def logged_in_appoint(self): # 登录随机vip
self.phone_id().send_keys(str(random.sample(public_number_vip,1)))
return self
def logged_in_not_vip_appoint(self): # 登录随机非会员账号
self.phone_id().send_keys(str(random.sample(public_number_not_vip,1)))
return self
def logged_in_appoint_183(self): # 登录18333334444
self.phone_id().send_keys('18333334444')
return self
# def check_logged_in_title(self): # 查看更多页已登录账号元素展示
def click_verification_code(self): # 点击获取验证码
self.verification_code().click()
return VerificationCodeAction(self._driver)
def check_verification_code_enabled(self): # 获取验证码按钮是否可点击
return self.verification_code().is_enabled()
def write_in_error_quantity(self): # 输入多位手机号
self.phone_id().send_keys('1399999219392s我!3')
return self
def number_quantity(self): # 判断手机号位数
return len(self.phone_id().text)
def click_privacy_agreement(self): # 点击登录页隐私协议入口
self.privacy_agreement().click()
return self
def click_service_agreement(self): # 点击登录页服务协议入口
self.service_agreement().click()
return self
def click_exit_privacy_agreement(self): # 点击隐私协议详情页左上角<
self.exit_privacy_agreement().click()
return self
def click_exit_service_agreement(self): # 点击服务协议详情页左上角<
self.exit_service_agreement().click()
return self
def check_keyboard_Delete(self): # 检查键盘Delete文本,可用来判断键盘是否存在
return self.keyboard_Delete().text
def logged_in_assert(self): # 判断是否进入了登录页
assert "欢迎登录迅游" in self.check_welcome_xunyou()
return self
def click_exit_logged_in(self): # 点击登录页左上角<点击,在加速首页触发的登录,返回加速页
self.exit_logged_in().click()
from page.test_accelerate_page import AccelerateHomeAction
return AccelerateHomeAction(self._driver)
def click_default_area_code(self): # 点击区号按钮
self.default_area_code().click()
return self
def click_exit_area_code(self): # 点击区号页左上角<,返回登录页
self.exit_area_code().click()
return self
def click_switch_area_code(self): # 点击区号页面阿富汗区号
self.switch_area_code().click()
return self
def check_switch_area_code(self): # 查看修改后的区号
return self.switch_area_code().text
def check_memory_logged_in_number(self): # 查看账号记忆功能文本
return self.memory_logged_in_number().text
|
[
21,
22,
23,
25,
26
] |
2,462 |
05cfd9d239b63c9b1e0c93a09e89cceb8d8e99e4
|
<mask token>
def get_cachefile(filename):
"""
Return full path to filename within cache dir.
"""
if not os.path.exists(cachedir):
os.makedirs(cachedir)
return os.path.join(cachedir, filename)
<mask token>
def get_cached_data(name):
if name not in _cachedict:
load_cachedict(name)
return _cachedict[name]
def cache_data(name, data):
""" Save data to cache under name
name: name of datastore
data: data to store
"""
cache_path = get_cachefile('%s.cache' % name)
with open(cache_path, 'wb') as f:
pickle.dump(data, f)
def cached_data_fresh(name, max_age):
""" Is data cached at name less than max_age old?
name: name of datastore
max_age: maximum age of data in seconds
returns True if data is less than max_age old, else False
"""
age = get_cached_data_age(name)
if not age:
return False
return age < max_age
def get_cached_data_age(name):
""" Return age of data cached at name in seconds or 0 if
cache doesn't exist
name: name of datastore
returns age of datastore in seconds
"""
cache_path = get_cachefile('%s.cache' % name)
if not os.path.exists(cache_path):
return 0
return time.time() - os.stat(cache_path).st_mtime
def clear_cache():
""" Delete all files in cache directory."""
if os.path.exists(get_cachedir()):
for filename in os.listdir(get_cachedir()):
if not filename.endswith('.cache'):
continue
path = os.path.join(get_cachedir(), filename)
os.unlink(path)
def clear_cachedict():
_cachedict.clear()
|
<mask token>
def get_cachefile(filename):
"""
Return full path to filename within cache dir.
"""
if not os.path.exists(cachedir):
os.makedirs(cachedir)
return os.path.join(cachedir, filename)
def load_cachedict(name):
cache_path = get_cachefile('%s.cache' % name)
if os.path.isfile(cache_path):
with open(cache_path, 'rb') as f:
_cachedict[name] = pickle.load(f)
def get_cached_data(name):
if name not in _cachedict:
load_cachedict(name)
return _cachedict[name]
def cache_data(name, data):
""" Save data to cache under name
name: name of datastore
data: data to store
"""
cache_path = get_cachefile('%s.cache' % name)
with open(cache_path, 'wb') as f:
pickle.dump(data, f)
def cached_data_fresh(name, max_age):
""" Is data cached at name less than max_age old?
name: name of datastore
max_age: maximum age of data in seconds
returns True if data is less than max_age old, else False
"""
age = get_cached_data_age(name)
if not age:
return False
return age < max_age
def get_cached_data_age(name):
""" Return age of data cached at name in seconds or 0 if
cache doesn't exist
name: name of datastore
returns age of datastore in seconds
"""
cache_path = get_cachefile('%s.cache' % name)
if not os.path.exists(cache_path):
return 0
return time.time() - os.stat(cache_path).st_mtime
def clear_cache():
""" Delete all files in cache directory."""
if os.path.exists(get_cachedir()):
for filename in os.listdir(get_cachedir()):
if not filename.endswith('.cache'):
continue
path = os.path.join(get_cachedir(), filename)
os.unlink(path)
def clear_cachedict():
_cachedict.clear()
|
<mask token>
try:
import cPickle as pickle
except:
import pickle
<mask token>
def get_cachefile(filename):
"""
Return full path to filename within cache dir.
"""
if not os.path.exists(cachedir):
os.makedirs(cachedir)
return os.path.join(cachedir, filename)
def load_cachedict(name):
cache_path = get_cachefile('%s.cache' % name)
if os.path.isfile(cache_path):
with open(cache_path, 'rb') as f:
_cachedict[name] = pickle.load(f)
def get_cached_data(name):
if name not in _cachedict:
load_cachedict(name)
return _cachedict[name]
def cache_data(name, data):
""" Save data to cache under name
name: name of datastore
data: data to store
"""
cache_path = get_cachefile('%s.cache' % name)
with open(cache_path, 'wb') as f:
pickle.dump(data, f)
def cached_data_fresh(name, max_age):
""" Is data cached at name less than max_age old?
name: name of datastore
max_age: maximum age of data in seconds
returns True if data is less than max_age old, else False
"""
age = get_cached_data_age(name)
if not age:
return False
return age < max_age
def get_cached_data_age(name):
""" Return age of data cached at name in seconds or 0 if
cache doesn't exist
name: name of datastore
returns age of datastore in seconds
"""
cache_path = get_cachefile('%s.cache' % name)
if not os.path.exists(cache_path):
return 0
return time.time() - os.stat(cache_path).st_mtime
def clear_cache():
""" Delete all files in cache directory."""
if os.path.exists(get_cachedir()):
for filename in os.listdir(get_cachedir()):
if not filename.endswith('.cache'):
continue
path = os.path.join(get_cachedir(), filename)
os.unlink(path)
def clear_cachedict():
_cachedict.clear()
|
import os
import time
try:
import cPickle as pickle
except:
import pickle
cachedir = os.path.expanduser('~/.cache/sherlock/')
_cachedict = {}
def get_cachefile(filename):
"""
Return full path to filename within cache dir.
"""
if not os.path.exists(cachedir):
os.makedirs(cachedir)
return os.path.join(cachedir, filename)
def load_cachedict(name):
cache_path = get_cachefile('%s.cache' % name)
if os.path.isfile(cache_path):
with open(cache_path, 'rb') as f:
_cachedict[name] = pickle.load(f)
def get_cached_data(name):
if name not in _cachedict:
load_cachedict(name)
return _cachedict[name]
def cache_data(name, data):
""" Save data to cache under name
name: name of datastore
data: data to store
"""
cache_path = get_cachefile('%s.cache' % name)
with open(cache_path, 'wb') as f:
pickle.dump(data, f)
def cached_data_fresh(name, max_age):
""" Is data cached at name less than max_age old?
name: name of datastore
max_age: maximum age of data in seconds
returns True if data is less than max_age old, else False
"""
age = get_cached_data_age(name)
if not age:
return False
return age < max_age
def get_cached_data_age(name):
""" Return age of data cached at name in seconds or 0 if
cache doesn't exist
name: name of datastore
returns age of datastore in seconds
"""
cache_path = get_cachefile('%s.cache' % name)
if not os.path.exists(cache_path):
return 0
return time.time() - os.stat(cache_path).st_mtime
def clear_cache():
""" Delete all files in cache directory."""
if os.path.exists(get_cachedir()):
for filename in os.listdir(get_cachedir()):
if not filename.endswith('.cache'):
continue
path = os.path.join(get_cachedir(), filename)
os.unlink(path)
def clear_cachedict():
_cachedict.clear()
|
import os
import time
try:
import cPickle as pickle
except:
import pickle
#-------------#
# Cache utils #
#-------------#
cachedir = os.path.expanduser('~/.cache/sherlock/')
_cachedict = {}
def get_cachefile(filename):
"""
Return full path to filename within cache dir.
"""
if not os.path.exists(cachedir):
os.makedirs(cachedir)
return os.path.join(cachedir, filename)
def load_cachedict(name):
cache_path = get_cachefile('%s.cache' % name)
if os.path.isfile(cache_path):
with open(cache_path, 'rb') as f:
_cachedict[name] = pickle.load(f)
def get_cached_data(name):
if name not in _cachedict:
load_cachedict(name)
return _cachedict[name]
def cache_data(name, data):
""" Save data to cache under name
name: name of datastore
data: data to store
"""
cache_path = get_cachefile('%s.cache' % name)
with open(cache_path, 'wb') as f:
pickle.dump(data, f)
def cached_data_fresh(name, max_age):
""" Is data cached at name less than max_age old?
name: name of datastore
max_age: maximum age of data in seconds
returns True if data is less than max_age old, else False
"""
age = get_cached_data_age(name)
if not age:
return False
return age < max_age
def get_cached_data_age(name):
""" Return age of data cached at name in seconds or 0 if
cache doesn't exist
name: name of datastore
returns age of datastore in seconds
"""
cache_path = get_cachefile('%s.cache' % name)
if not os.path.exists(cache_path):
return 0
return time.time() - os.stat(cache_path).st_mtime
def clear_cache():
""" Delete all files in cache directory."""
if os.path.exists(get_cachedir()):
for filename in os.listdir(get_cachedir()):
if not filename.endswith('.cache'):
continue
path = os.path.join(get_cachedir(), filename)
os.unlink(path)
def clear_cachedict():
_cachedict.clear()
|
[
7,
8,
9,
11,
12
] |
2,463 |
3f473701b186b5287258ba74e478cccdad0f29bf
|
<mask token>
|
<mask token>
def corr2d(X, K):
"""
定义二维互相关运算函数
:param X:输入数组
:param K: 核数组
:return:二维互相关的运算结果
"""
h, w = K.shape
Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j].assign(tf.cast(tf.reduce_sum(X[i:i + h, j:j + w] * K),
dtype=tf.float32))
return Y
print('----------验证二维互相关运算的结果--------------')
<mask token>
print(corr2d(X, K))
|
<mask token>
def corr2d(X, K):
"""
定义二维互相关运算函数
:param X:输入数组
:param K: 核数组
:return:二维互相关的运算结果
"""
h, w = K.shape
Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j].assign(tf.cast(tf.reduce_sum(X[i:i + h, j:j + w] * K),
dtype=tf.float32))
return Y
print('----------验证二维互相关运算的结果--------------')
X = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
K = tf.constant([[0, 1], [2, 3]])
<mask token>
print(corr2d(X, K))
|
<mask token>
import tensorflow as tf
def corr2d(X, K):
"""
定义二维互相关运算函数
:param X:输入数组
:param K: 核数组
:return:二维互相关的运算结果
"""
h, w = K.shape
Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j].assign(tf.cast(tf.reduce_sum(X[i:i + h, j:j + w] * K),
dtype=tf.float32))
return Y
print('----------验证二维互相关运算的结果--------------')
X = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
K = tf.constant([[0, 1], [2, 3]])
<mask token>
print(corr2d(X, K))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@File : corr2d.py
@Author : jeffsheng
@Date : 2020/1/3
@Desc : 卷积层中的互相关(cross-correlation)运算
卷积层需要学习的参数是:卷积核和偏置大小
"""
import tensorflow as tf
def corr2d(X, K):
"""
定义二维互相关运算函数
:param X:输入数组
:param K: 核数组
:return:二维互相关的运算结果
"""
h, w = K.shape
Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w +1)))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i,j].assign(tf.cast(tf.reduce_sum(X[i:i+h, j:j+w] * K), dtype=tf.float32))
return Y
print("----------验证二维互相关运算的结果--------------")
X = tf.constant([[0,1,2], [3,4,5], [6,7,8]])
K = tf.constant([[0,1], [2,3]])
"""
<tf.Variable 'Variable:0' shape=(2, 2) dtype=float32, numpy=
array([[19., 25.],
[37., 43.]], dtype=float32)>
"""
print(corr2d(X, K))
|
[
0,
2,
3,
4,
5
] |
2,464 |
de634c95fddf4591cb15cd0eb20e798043075798
|
<mask token>
|
def two_teams(sailors):
result = []
temp = [[], []]
for i in sailors.items():
if i[1] > 40 or i[1] < 20:
temp[0].append(i[0])
else:
temp[1].append(i[0])
result.append(sorted(temp[0]))
result.append(sorted(temp[1]))
return result
<mask token>
|
def two_teams(sailors):
result = []
temp = [[], []]
for i in sailors.items():
if i[1] > 40 or i[1] < 20:
temp[0].append(i[0])
else:
temp[1].append(i[0])
result.append(sorted(temp[0]))
result.append(sorted(temp[1]))
return result
if __name__ == '__main__':
print('Example:')
print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19})
)
print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41,
'McCortney': 54}))
assert two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}
) == [['Abrahams', 'Coleman'], ['Smith', 'Wesson']]
assert two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41,
'McCortney': 54}) == [['Fernandes', 'Kale', 'McCortney'], ['Johnson']]
print("Coding complete? Click 'Check' to earn cool rewards!")
|
#Answer to The Ship Teams - https://py.checkio.org/en/mission/the-ship-teams/
def two_teams(sailors):
result = [] #To store the result
temp = [[],[]] #To store the intermediatary values
for i in sailors.items(): #To get the values of dictionary as Tuple
if i[1] > 40 or i[1] < 20: #To get the people to be added to the First Ship
temp[0].append(i[0]) #Adding each person name to first Temp List
else: #To get the people to be added to the Second Ship
temp[1].append(i[0]) #Adding each person name to second Temp List
result.append(sorted(temp[0])) #Adding all the names of the Ship 1 to resultant
result.append(sorted(temp[1])) #Adding all the names of the Ship 2 to resultant
return result #Return the result
if __name__ == '__main__':
print("Example:")
print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}))
print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41, 'McCortney': 54}))
#These "asserts" using only for self-checking and not necessary for auto-testing
assert two_teams({
'Smith': 34,
'Wesson': 22,
'Coleman': 45,
'Abrahams': 19}) == [
['Abrahams', 'Coleman'],
['Smith', 'Wesson']
]
assert two_teams({
'Fernandes': 18,
'Johnson': 22,
'Kale': 41,
'McCortney': 54}) == [
['Fernandes', 'Kale', 'McCortney'],
['Johnson']
]
print("Coding complete? Click 'Check' to earn cool rewards!")
| null |
[
0,
1,
2,
3
] |
2,465 |
3c029adb59cd6db1e3d4a22e6561f5e2ae827d60
|
<mask token>
|
def solution(n):
arr = [[(0) for _ in range(i + 1)] for i in range(n)]
size = n
num = 0
x = 0
y = -1
while True:
for _ in range(size):
num += 1
y += 1
arr[y][x] = num
size -= 1
if size == 0:
break
for _ in range(size):
num += 1
x += 1
arr[y][x] = num
size -= 1
if size == 0:
break
for _ in range(size):
num += 1
x -= 1
y -= 1
arr[y][x] = num
size -= 1
if size == 0:
break
answer = []
for i in arr:
answer.extend(i)
return answer
|
# https://daphne-dev.github.io/2020/09/24/algo-022/
def solution(n):
arr = [[0 for _ in range(i+1)] for i in range(n)]
# 경우의수 는 3가지
# 1. y축이 증가하면서 수가 증가
# 2. x축이 증가하면서 수가 증가
# 3. y,x축이 감소하면서 수가 증가
size = n
num = 0
x = 0
y = -1
while True:
# 1번
for _ in range(size):
num += 1
y += 1
arr[y][x] = num
size-=1
if size == 0:
break
# 2번
for _ in range(size):
num += 1
x += 1
arr[y][x] = num
size-=1
if size == 0:
break
# 3번
for _ in range(size):
num += 1
x -= 1
y -= 1
arr[y][x] = num
size-=1
if size == 0:
break
answer = []
for i in arr:
answer.extend(i)
return answer
# print(solution(4))
| null | null |
[
0,
1,
2
] |
2,466 |
6b5399effe73d27eade0381f016cd7819a6e104a
|
<mask token>
|
<mask token>
cv2.namedWindow('st', cv2.WINDOW_NORMAL)
cv2.imshow('st', img)
cv2.imwrite('mes.png', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
<mask token>
img = cv2.imread('d:\\st.jpg', 0)
cv2.namedWindow('st', cv2.WINDOW_NORMAL)
cv2.imshow('st', img)
cv2.imwrite('mes.png', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import tensorflow as tf
import cv2
img = cv2.imread('d:\\st.jpg', 0)
cv2.namedWindow('st', cv2.WINDOW_NORMAL)
cv2.imshow('st', img)
cv2.imwrite('mes.png', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import tensorflow as tf
import cv2
img=cv2.imread('d:\st.jpg',0)
cv2.namedWindow('st',cv2.WINDOW_NORMAL)#可以调整图像窗口大小
cv2.imshow('st',img)
cv2.imwrite('mes.png',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
0,
1,
2,
3,
4
] |
2,467 |
72f1547ea7de78a5fe4b583523e592fa25c0ee77
|
<mask token>
|
<mask token>
if button == True:
df = pd.read_csv(upload)
st.write(df.head())
fig = plt.figure()
my = fig.add_subplot(1, 1, 1)
my.scatter(df['sepal.length'], df['petal.length'])
my.set_xlabel('sepal.length')
my.set_ylabel('petal.length')
st.write(fig)
|
<mask token>
username = st.text_input('username')
upload = st.file_uploader('uploadfile', type=['csv'])
button = st.button('submit')
if button == True:
df = pd.read_csv(upload)
st.write(df.head())
fig = plt.figure()
my = fig.add_subplot(1, 1, 1)
my.scatter(df['sepal.length'], df['petal.length'])
my.set_xlabel('sepal.length')
my.set_ylabel('petal.length')
st.write(fig)
|
import streamlit as st
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
username = st.text_input('username')
upload = st.file_uploader('uploadfile', type=['csv'])
button = st.button('submit')
if button == True:
df = pd.read_csv(upload)
st.write(df.head())
fig = plt.figure()
my = fig.add_subplot(1, 1, 1)
my.scatter(df['sepal.length'], df['petal.length'])
my.set_xlabel('sepal.length')
my.set_ylabel('petal.length')
st.write(fig)
|
import streamlit as st
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
username=st.text_input ("username")
upload=st.file_uploader("uploadfile",type=['csv'])
button=st.button("submit")
if button==True:
df=pd.read_csv(upload)
st.write(df.head())
fig = plt.figure()
my = fig.add_subplot(1,1,1)
my.scatter(df["sepal.length"],df["petal.length"],)
my.set_xlabel("sepal.length")
my.set_ylabel("petal.length")
st.write(fig)
|
[
0,
1,
2,
3,
4
] |
2,468 |
657866affd653a99eb7d9a9a82b2f7d6503ec21a
|
from parser import read_expression_line, read_expression_lines, read_assignment_line, read_import_line, Import
def test_expression():
lines = ['a % b']
expression, left = read_expression_lines(lines)
assert expression is not None and len(left) == 0, left
print "test_expression 0: {} {}".format(expression, left)
lines = ['[a+b]']
expression, left = read_expression_lines(lines)
assert expression is not None
print "{} {}".format(expression, left)
lines = [
'get_name({',
'"first":"mike",',
'"last":"yu"',
'}):'
]
expression, leftt = read_expression_lines(lines)
assert expression is not None
print "{} {}".format(expression, left)
lines = [
'[a[0]*b[1]]',
]
expression, left = read_expression_lines(lines)
assert expression is not None
print "{} {}".format(expression, left)
lines = [
'[a[0]*b[1] - c[2]*d[3],'
'e]',
]
expression, left = read_expression_lines(lines)
assert expression is not None
print "{} {}".format(expression, left)
lines = [
'(vector[i] * vector[i])'
]
expression, left = read_expression_lines(lines)
assert expression is not None
print "{} {}".format(expression, left)
lines = [
#'if value >= 0 && value < lengths[axis]:'
'value >= 0 && value < lengths[axis]'
#'value >= 0 && value < lengths[axis]'
#'value < 0'
]
expression, left = read_expression_lines(lines)
print "test_expression {} {}".format(expression, left)
assert expression is not None and len(left) == 0
lines = [
'assert(matrix == [[1,2,3],[4,5,6]])'
]
expression, left = read_expression_lines(lines)
print "test_expression assert {} {}".format(expression, left)
assert expression is not None and len(left) == 0
def test_assignment():
print "Testing assignments"
expression = read_assignment_line('a = 5')
assert expression is not None
print "{}".format(expression)
line = 'text = null'
expression = read_assignment_line(line)
assert expression is not None
print "test assignment 0: {}".format(expression)
expression = read_assignment_line('sum += 5')
assert expression is not None
print "{}".format(expression)
expression = read_assignment_line('some[axis] += value')
assert expression is not None
print "{}".format(expression)
expression = read_assignment_line('sum_indices = [indices[0], indices[1], indices[2]]')
assert expression is not None
print "{}".format(expression)
text = 'faces[0][0] = true'
expression = read_assignment_line(text)
assert expression is not None
print "{}\n {}".format(text, expression)
text = 'face.arm = true'
expression = read_assignment_line(text)
assert expression is not None
print "test asignment {}\n {}".format(text, expression)
text = '(a, b, c) = bob()'
expression = read_assignment_line(text)
assert expression is not None
print "test asignment 2 {}\n {}".format(text, expression)
text = 'c = bob(a - 6)'
assignment, tokens = read_assignment_line(text)
assert assignment is not None and len(tokens) == 0
print "test asignment 3 {}\n {}\n {}".format(text, assignment, tokens)
def test_parser():
expression, left = read_import_line("from shared import translate")
assert expression is not None
assert isinstance(expression, Import)
print "test_parser: {}".format(expression)
expression, left = read_import_line("from shared import (translate, bob)")
assert expression is not None
assert isinstance(expression, Import)
print "test_parser 2 : {}".format(expression)
lines = ['"john"']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['a + b']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['0']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['length(c)']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['length(c)[0][1][2]']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['(length(c))[0][1][2]']
expression, left = read_expression_line(lines[0])
assert expression is not None
print "test parser: {}".format(expression)
assert expression is not None
lines = ['d[0]']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['[e, f]']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['[g, str(h)]']
expression, left = read_expression_line(lines[0])
assert expression is not None
print "starting dict test 1"
lines = ['{"name":"mike"}']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['{"first":"alex", "last":"oh"}']
expression, left = read_expression_line(lines[0])
assert expression is not None
line = '((position[0] - middleX)/middleX)*width'
expression, left = read_expression_line(line)
assert expression is not None
line = 'keyboard.key_state.bob'
expression, left = read_expression_line(line)
assert expression is not None
print "test parser 3: {}".format(expression)
line = 'mouse.button[2]'
expression, left = read_expression_line(line)
assert expression is not None
print "test parser 4: {}".format(expression)
line = '{ "position": [0,0,0], "bob": "dole", "nice": "brother" }'
expression, left = read_expression_line(line)
assert expression is not None
print "test parser 5: {}".format(expression)
line = 'file_read(join([state.things_dir, "/", state.thing_name]), text)'
expression, left = read_expression_line(line)
assert expression is not None
print "test parser 6: {}".format(expression)
if __name__ == '__main__':
test_parser()
test_expression()
test_assignment()
| null | null | null | null |
[
0
] |
2,469 |
08c309645a4ee59716bdd00556096be1c784331a
|
<mask token>
class HandleYaml:
<mask token>
def __init__(self):
with open(YAML_FILE_PATH, 'r') as fs:
content = fs.read()
self.ya = yaml.load(content, yaml.FullLoader)
def get_value(self):
return self.ya
<mask token>
|
<mask token>
class HandleYaml:
"""
处理并封装yaml文件
"""
def __init__(self):
with open(YAML_FILE_PATH, 'r') as fs:
content = fs.read()
self.ya = yaml.load(content, yaml.FullLoader)
def get_value(self):
return self.ya
<mask token>
|
<mask token>
class HandleYaml:
"""
处理并封装yaml文件
"""
def __init__(self):
with open(YAML_FILE_PATH, 'r') as fs:
content = fs.read()
self.ya = yaml.load(content, yaml.FullLoader)
def get_value(self):
return self.ya
<mask token>
if __name__ == '__main__':
print(desired_caps)
|
<mask token>
class HandleYaml:
"""
处理并封装yaml文件
"""
def __init__(self):
with open(YAML_FILE_PATH, 'r') as fs:
content = fs.read()
self.ya = yaml.load(content, yaml.FullLoader)
def get_value(self):
return self.ya
desired_caps = HandleYaml().get_value()
if __name__ == '__main__':
print(desired_caps)
|
# -*- coding: UTF-8 -*-
'''=================================================
@Project -> File :AutoMailApp -> handle_yaml
@IDE :PyCharm
@Author :Mr. wang
@Date :2019/11/15 0015 19:53
@Desc :
=================================================='''
import yaml
from Common.dir_path import YAML_FILE_PATH
class HandleYaml():
"""
处理并封装yaml文件
"""
def __init__(self):
with open(YAML_FILE_PATH, 'r') as fs:
content = fs.read()
self.ya = yaml.load(content,yaml.FullLoader)
def get_value(self):
return self.ya
desired_caps = HandleYaml().get_value()
if __name__=="__main__":
print(desired_caps)
|
[
3,
4,
5,
6,
8
] |
2,470 |
a73dcfc21c31d4e984db39c072d11cb9a9c3d5e5
|
<mask token>
class ScheduledEventEntityMetadata(TypedDict):
location: str
class ScheduledEventSubscriber(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
member: Member | None
|
<mask token>
class ScheduledEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
channel_id: Snowflake
creator_id: Snowflake
name: str
description: str
image: str | None
scheduled_start_time: str
scheduled_end_time: str | None
privacy_level: ScheduledEventPrivacyLevel
status: ScheduledEventStatus
entity_type: ScheduledEventLocationType
entity_id: Snowflake
entity_metadata: ScheduledEventEntityMetadata
creator: User
user_count: int | None
class ScheduledEventEntityMetadata(TypedDict):
location: str
class ScheduledEventSubscriber(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
member: Member | None
|
<mask token>
ScheduledEventStatus = Literal[1, 2, 3, 4]
ScheduledEventLocationType = Literal[1, 2, 3]
ScheduledEventPrivacyLevel = Literal[2]
class ScheduledEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
channel_id: Snowflake
creator_id: Snowflake
name: str
description: str
image: str | None
scheduled_start_time: str
scheduled_end_time: str | None
privacy_level: ScheduledEventPrivacyLevel
status: ScheduledEventStatus
entity_type: ScheduledEventLocationType
entity_id: Snowflake
entity_metadata: ScheduledEventEntityMetadata
creator: User
user_count: int | None
class ScheduledEventEntityMetadata(TypedDict):
location: str
class ScheduledEventSubscriber(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
member: Member | None
|
<mask token>
from __future__ import annotations
from typing import Literal, TypedDict
from .member import Member
from .snowflake import Snowflake
from .user import User
ScheduledEventStatus = Literal[1, 2, 3, 4]
ScheduledEventLocationType = Literal[1, 2, 3]
ScheduledEventPrivacyLevel = Literal[2]
class ScheduledEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
channel_id: Snowflake
creator_id: Snowflake
name: str
description: str
image: str | None
scheduled_start_time: str
scheduled_end_time: str | None
privacy_level: ScheduledEventPrivacyLevel
status: ScheduledEventStatus
entity_type: ScheduledEventLocationType
entity_id: Snowflake
entity_metadata: ScheduledEventEntityMetadata
creator: User
user_count: int | None
class ScheduledEventEntityMetadata(TypedDict):
location: str
class ScheduledEventSubscriber(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
member: Member | None
|
"""
The MIT License (MIT)
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Literal, TypedDict
from .member import Member
from .snowflake import Snowflake
from .user import User
ScheduledEventStatus = Literal[1, 2, 3, 4]
ScheduledEventLocationType = Literal[1, 2, 3]
ScheduledEventPrivacyLevel = Literal[2]
class ScheduledEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
channel_id: Snowflake
creator_id: Snowflake
name: str
description: str
image: str | None
scheduled_start_time: str
scheduled_end_time: str | None
privacy_level: ScheduledEventPrivacyLevel
status: ScheduledEventStatus
entity_type: ScheduledEventLocationType
entity_id: Snowflake
entity_metadata: ScheduledEventEntityMetadata
creator: User
user_count: int | None
class ScheduledEventEntityMetadata(TypedDict):
location: str
class ScheduledEventSubscriber(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
member: Member | None
|
[
2,
3,
4,
5,
6
] |
2,471 |
ccd1e57518065963158984dda52297db45ce204e
|
<mask token>
|
etc_dictionary = {'2 30대': '이삼십대', '20~30대': '이삼십대', '20, 30대': '이십대 삼십대',
'1+1': '원플러스원', '3에서 6개월인': '3개월에서 육개월인'}
english_dictionary = {'Devsisters': '데브시스터즈', 'track': '트랙', 'LA': '엘에이',
'LG': '엘지', 'KOREA': '코리아', 'JSA': '제이에스에이', 'PGA': '피지에이', 'GA': '지에이',
'idol': '아이돌', 'KTX': '케이티엑스', 'AC': '에이씨', 'DVD': '디비디', 'US': '유에스',
'CNN': '씨엔엔', 'LPGA': '엘피지에이', 'P': '피', 'L': '엘', 'T': '티', 'B': '비',
'C': '씨', 'BIFF': '비아이에프에프', 'GV': '지비', 'IT': '아이티', 'IQ': '아이큐',
'JTBC': '제이티비씨', 'trickle down effect': '트리클 다운 이펙트',
'trickle up effect': '트리클 업 이펙트', 'down': '다운', 'up': '업', 'FCK':
'에프씨케이', 'AP': '에이피', 'WHERETHEWILDTHINGSARE': '', 'Rashomon Effect':
'', 'O': '오', 'OO': '오오', 'B': '비', 'GDP': '지디피', 'CIPA': '씨아이피에이',
'YS': '와이에스', 'Y': '와이', 'S': '에스', 'JTBC': '제이티비씨', 'PC': '피씨', 'bill':
'빌', 'Halmuny': '하모니', 'X': '엑스', 'SNS': '에스엔에스', 'ability': '어빌리티',
'shy': '', 'CCTV': '씨씨티비', 'IT': '아이티', 'the tenth man': '더 텐쓰 맨', 'L':
'엘', 'PC': '피씨', 'YSDJJPMB': '', 'Content Attitude Timing':
'컨텐트 애티튜드 타이밍', 'CAT': '캣', 'IS': '아이에스', 'SNS': '에스엔에스', 'K': '케이',
'Y': '와이', 'KDI': '케이디아이', 'DOC': '디오씨', 'CIA': '씨아이에이', 'PBS': '피비에스',
'D': '디', 'PPropertyPositionPowerPrisonPS': '에스', 'francisco': '프란시스코',
'I': '아이', 'III': '아이아이', 'No joke': '노 조크', 'BBK': '비비케이', 'LA': '엘에이',
'Don': '', 't worry be happy': ' 워리 비 해피', 'NO': '엔오', 'it was our sky':
'잇 워즈 아워 스카이', 'it is our sky': '잇 이즈 아워 스카이', 'NEIS': '엔이아이에스', 'IMF':
'아이엠에프', 'apology': '어폴로지', 'humble': '험블', 'M': '엠', 'Nowhere Man':
'노웨어 맨', 'The Tenth Man': '더 텐쓰 맨', 'PBS': '피비에스', 'BBC': '비비씨', 'MRJ':
'엠알제이', 'CCTV': '씨씨티비', 'Pick me up': '픽 미 업', 'DNA': '디엔에이', 'UN':
'유엔', 'STOP': '스탑', 'PRESS': '프레스', 'not to be': '낫 투비', 'Denial':
'디나이얼', 'G': '지', 'IMF': '아이엠에프', 'GDP': '지디피', 'JTBC': '제이티비씨',
'Time flies like an arrow': '타임 플라이즈 라이크 언 애로우', 'DDT': '디디티', 'AI':
'에이아이', 'Z': '제트', 'OECD': '오이씨디', 'N': '앤', 'A': '에이', 'MB': '엠비',
'EH': '이에이치', 'IS': '아이에스', 'TV': '티비', 'MIT': '엠아이티', 'KBO': '케이비오',
'I love America': '아이 러브 아메리카', 'SF': '에스에프', 'Q': '큐', 'KFX': '케이에프엑스',
'PM': '피엠', 'Prime Minister': '프라임 미니스터', 'Swordline': '스워드라인', 'TBS':
'티비에스', 'DDT': '디디티', 'CS': '씨에스', 'Reflecting Absence': '리플렉팅 앱센스',
'PBS': '피비에스', 'Drum being beaten by everyone': '드럼 빙 비튼 바이 에브리원',
'negative pressure': '네거티브 프레셔', 'F': '에프', 'KIA': '기아', 'FTA': '에프티에이',
'Que sais-je': '', 'UFC': '유에프씨', 'P': '피', 'DJ': '디제이', 'Chaebol':
'채벌', 'BBC': '비비씨', 'OECD': '오이씨디', 'BC': '삐씨', 'C': '씨', 'B': '씨',
'KY': '케이와이', 'K': '케이', 'CEO': '씨이오', 'YH': '와이에치', 'IS': '아이에스',
'who are you': '후 얼 유', 'Y': '와이', 'The Devils Advocate': '더 데빌즈 어드보카트',
'YS': '와이에스', 'so sorry': '쏘 쏘리', 'Santa': '산타', 'Big Endian': '빅 엔디안',
'Small Endian': '스몰 엔디안', 'Oh Captain My Captain': '오 캡틴 마이 캡틴', 'AIB':
'에이아이비', 'K': '케이', 'PBS': '피비에스'}
|
# coding: utf-8
etc_dictionary = {
'2 30대': '이삼십대',
'20~30대': '이삼십대',
'20, 30대': '이십대 삼십대',
'1+1': '원플러스원',
'3에서 6개월인': '3개월에서 육개월인',
}
english_dictionary = {
'Devsisters': '데브시스터즈',
'track': '트랙',
# krbook
'LA': '엘에이',
'LG': '엘지',
'KOREA': '코리아',
'JSA': '제이에스에이',
'PGA': '피지에이',
'GA': '지에이',
'idol': '아이돌',
'KTX': '케이티엑스',
'AC': '에이씨',
'DVD': '디비디',
'US': '유에스',
'CNN': '씨엔엔',
'LPGA': '엘피지에이',
'P': '피',
'L': '엘',
'T': '티',
'B': '비',
'C': '씨',
'BIFF': '비아이에프에프',
'GV': '지비',
# JTBC
'IT': '아이티',
'IQ': '아이큐',
'JTBC': '제이티비씨',
'trickle down effect': '트리클 다운 이펙트',
'trickle up effect': '트리클 업 이펙트',
'down': '다운',
'up': '업',
'FCK': '에프씨케이',
'AP': '에이피',
'WHERETHEWILDTHINGSARE': '',
'Rashomon Effect': '',
'O': '오',
'OO': '오오',
'B': '비',
'GDP': '지디피',
'CIPA': '씨아이피에이',
'YS': '와이에스',
'Y': '와이',
'S': '에스',
'JTBC': '제이티비씨',
'PC': '피씨',
'bill': '빌',
'Halmuny': '하모니', #####
'X': '엑스',
'SNS': '에스엔에스',
'ability': '어빌리티',
'shy': '',
'CCTV': '씨씨티비',
'IT': '아이티',
'the tenth man': '더 텐쓰 맨', ####
'L': '엘',
'PC': '피씨',
'YSDJJPMB': '', ########
'Content Attitude Timing': '컨텐트 애티튜드 타이밍',
'CAT': '캣',
'IS': '아이에스',
'SNS': '에스엔에스',
'K': '케이',
'Y': '와이',
'KDI': '케이디아이',
'DOC': '디오씨',
'CIA': '씨아이에이',
'PBS': '피비에스',
'D': '디',
'PPropertyPositionPowerPrisonP'
'S': '에스',
'francisco': '프란시스코',
'I': '아이',
'III': '아이아이', ######
'No joke': '노 조크',
'BBK': '비비케이',
'LA': '엘에이',
'Don': '',
't worry be happy': ' 워리 비 해피',
'NO': '엔오', #####
'it was our sky': '잇 워즈 아워 스카이',
'it is our sky': '잇 이즈 아워 스카이', ####
'NEIS': '엔이아이에스', #####
'IMF': '아이엠에프',
'apology': '어폴로지',
'humble': '험블',
'M': '엠',
'Nowhere Man': '노웨어 맨',
'The Tenth Man': '더 텐쓰 맨',
'PBS': '피비에스',
'BBC': '비비씨',
'MRJ': '엠알제이',
'CCTV': '씨씨티비',
'Pick me up': '픽 미 업',
'DNA': '디엔에이',
'UN': '유엔',
'STOP': '스탑', #####
'PRESS': '프레스', #####
'not to be': '낫 투비',
'Denial': '디나이얼',
'G': '지',
'IMF': '아이엠에프',
'GDP': '지디피',
'JTBC': '제이티비씨',
'Time flies like an arrow': '타임 플라이즈 라이크 언 애로우',
'DDT': '디디티',
'AI': '에이아이',
'Z': '제트',
'OECD': '오이씨디',
'N': '앤',
'A': '에이',
'MB': '엠비',
'EH': '이에이치',
'IS': '아이에스',
'TV': '티비',
'MIT': '엠아이티',
'KBO': '케이비오',
'I love America': '아이 러브 아메리카',
'SF': '에스에프',
'Q': '큐',
'KFX': '케이에프엑스',
'PM': '피엠',
'Prime Minister': '프라임 미니스터',
'Swordline': '스워드라인',
'TBS': '티비에스',
'DDT': '디디티',
'CS': '씨에스',
'Reflecting Absence': '리플렉팅 앱센스',
'PBS': '피비에스',
'Drum being beaten by everyone': '드럼 빙 비튼 바이 에브리원',
'negative pressure': '네거티브 프레셔',
'F': '에프',
'KIA': '기아',
'FTA': '에프티에이',
'Que sais-je': '',
'UFC': '유에프씨',
'P': '피',
'DJ': '디제이',
'Chaebol': '채벌',
'BBC': '비비씨',
'OECD': '오이씨디',
'BC': '삐씨',
'C': '씨',
'B': '씨',
'KY': '케이와이',
'K': '케이',
'CEO': '씨이오',
'YH': '와이에치',
'IS': '아이에스',
'who are you': '후 얼 유',
'Y': '와이',
'The Devils Advocate': '더 데빌즈 어드보카트',
'YS': '와이에스',
'so sorry': '쏘 쏘리',
'Santa': '산타',
'Big Endian': '빅 엔디안',
'Small Endian': '스몰 엔디안',
'Oh Captain My Captain': '오 캡틴 마이 캡틴',
'AIB': '에이아이비',
'K': '케이',
'PBS': '피비에스',
}
| null | null |
[
0,
1,
2
] |
2,472 |
ec0697d8d78fafe6bfd4630be2a1fb20eb9eb4cf
|
<mask token>
|
<mask token>
while True:
os.chdir('/home/ec2-user/ML-Processed')
print(str(os.getcwd()))
for f in os.listdir(os.getcwd()):
print('looping in file')
file_name, file_ext = os.path.splitext(f)
if file_ext == '.jpg':
print('working with this file ' + f)
print('about to upload ' + str(datetime.datetime.now()) + '\r\n')
s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.
argv[1]) + '/' + f)
print('Uploaded to S3 ' + str(datetime.datetime.now()) + '\r\n')
shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')
print('should of moved the file locally')
print('sleeping, ')
time.sleep(2)
|
<mask token>
session = boto3.Session(profile_name='default')
s3 = boto3.resource('s3')
bucket = s3.Bucket('netball-ml-processed')
while True:
os.chdir('/home/ec2-user/ML-Processed')
print(str(os.getcwd()))
for f in os.listdir(os.getcwd()):
print('looping in file')
file_name, file_ext = os.path.splitext(f)
if file_ext == '.jpg':
print('working with this file ' + f)
print('about to upload ' + str(datetime.datetime.now()) + '\r\n')
s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.
argv[1]) + '/' + f)
print('Uploaded to S3 ' + str(datetime.datetime.now()) + '\r\n')
shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')
print('should of moved the file locally')
print('sleeping, ')
time.sleep(2)
|
import boto3, os, shutil, datetime, time, sys
session = boto3.Session(profile_name='default')
s3 = boto3.resource('s3')
bucket = s3.Bucket('netball-ml-processed')
while True:
os.chdir('/home/ec2-user/ML-Processed')
print(str(os.getcwd()))
for f in os.listdir(os.getcwd()):
print('looping in file')
file_name, file_ext = os.path.splitext(f)
if file_ext == '.jpg':
print('working with this file ' + f)
print('about to upload ' + str(datetime.datetime.now()) + '\r\n')
s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.
argv[1]) + '/' + f)
print('Uploaded to S3 ' + str(datetime.datetime.now()) + '\r\n')
shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')
print('should of moved the file locally')
print('sleeping, ')
time.sleep(2)
|
import boto3, os, shutil, datetime, time, sys
session = boto3.Session(profile_name='default')
s3 = boto3.resource('s3')
bucket = s3.Bucket('netball-ml-processed')
#print(bucket.objects)
#needs to be run with *** sudo **** otherwise it won't work...
while True:
#change to the motion working Directory
os.chdir('/home/ec2-user/ML-Processed')
print (str(os.getcwd()))
for f in os.listdir(os.getcwd()):
print("looping in file")
file_name, file_ext = os.path.splitext(f)
#need to check the file starts with 2 (as in the timestamp) and is a .jpg
if file_ext == '.jpg':
print("working with this file " + f)
print("about to upload " + str(datetime.datetime.now()) + "\r\n")
# s3.meta.client.upload_file('/Users/andrewhammond/s3_upload.jpg','netball-ml-processing', 's3_upload.jpg')
s3.meta.client.upload_file(f, 'netball-ml-processed', str(sys.argv[1]) + "/" + f)
print ("Uploaded to S3 " + str(datetime.datetime.now()) + "\r\n")
# once pushed to s3 need to shift locally.
shutil.move(f, '/home/ec2-user/ML-Processed/shifted_to_s3')
print ("should of moved the file locally")
print ("sleeping, ")
time.sleep(2)
|
[
0,
1,
2,
3,
4
] |
2,473 |
316a34bbc2b3e3c818ef837f51bc1f86863ea59a
|
<mask token>
|
<mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
|
<mask token>
class Migration(migrations.Migration):
dependencies = [('pancar', '0006_auto_20200526_1058')]
operations = [migrations.AlterField(model_name='process', name='price',
field=models.DecimalField(decimal_places=1, max_digits=5, null=True))]
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('pancar', '0006_auto_20200526_1058')]
operations = [migrations.AlterField(model_name='process', name='price',
field=models.DecimalField(decimal_places=1, max_digits=5, null=True))]
|
# Generated by Django 2.2.6 on 2020-05-27 19:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pancar', '0006_auto_20200526_1058'),
]
operations = [
migrations.AlterField(
model_name='process',
name='price',
field=models.DecimalField(decimal_places=1, max_digits=5, null=True),
),
]
|
[
0,
1,
2,
3,
4
] |
2,474 |
9cb5573fada7a1529507da1d031f836044c10066
|
<mask token>
|
class Solution:
<mask token>
|
class Solution:
def longestConsecutive(self, nums) ->int:
s = set(nums)
answer = 0
for value in s:
if value - 1 not in s:
j = value
while j in s:
j = j + 1
answer = max(answer, j - value)
return answer
|
class Solution:
def longestConsecutive(self, nums) -> int:
s = set(nums)
answer = 0
# n = len(s)
for value in s:
if value - 1 not in s:
j = value
while (j in s):
j = j + 1
answer = max(answer, j - value)
return answer
| null |
[
0,
1,
2,
3
] |
2,475 |
43ae01ffe35c6c4491f3f7e480dd6f5c1be86eb2
|
<mask token>
|
<mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
|
<mask token>
class Migration(migrations.Migration):
dependencies = [('element', '0011_suggestion_suggestion_type'), ('bot',
'0001_initial')]
operations = [migrations.AddField(model_name='discorduser', name=
'has_elements', field=models.ManyToManyField(to='element.Element'))]
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('element', '0011_suggestion_suggestion_type'), ('bot',
'0001_initial')]
operations = [migrations.AddField(model_name='discorduser', name=
'has_elements', field=models.ManyToManyField(to='element.Element'))]
|
# Generated by Django 3.1.1 on 2020-12-02 19:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('element', '0011_suggestion_suggestion_type'),
('bot', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='discorduser',
name='has_elements',
field=models.ManyToManyField(to='element.Element'),
),
]
|
[
0,
1,
2,
3,
4
] |
2,476 |
24c1f5195bad17f995fb97a03218fc9bbe5ce4cd
|
<mask token>
|
<mask token>
def lis(n1, n2):
"""
Generate and print last 5 element in list.
param:n1,n2
"""
i = 0
if n1 and n2 <= 20:
for x in range(n1, n2 + 1):
lis1.append(x * x)
lis1.reverse()
for y in lis1:
if i <= 4:
lis2.append(y)
i += 1
print(lis2)
else:
print('Value out of range')
<mask token>
|
<mask token>
def lis(n1, n2):
"""
Generate and print last 5 element in list.
param:n1,n2
"""
i = 0
if n1 and n2 <= 20:
for x in range(n1, n2 + 1):
lis1.append(x * x)
lis1.reverse()
for y in lis1:
if i <= 4:
lis2.append(y)
i += 1
print(lis2)
else:
print('Value out of range')
lis(input_num[0], input_num[1])
|
<mask token>
input_num = input('Write number:')
lis1 = []
lis2 = []
def lis(n1, n2):
"""
Generate and print last 5 element in list.
param:n1,n2
"""
i = 0
if n1 and n2 <= 20:
for x in range(n1, n2 + 1):
lis1.append(x * x)
lis1.reverse()
for y in lis1:
if i <= 4:
lis2.append(y)
i += 1
print(lis2)
else:
print('Value out of range')
lis(input_num[0], input_num[1])
|
"""
Question 39:
Define a function which can generate a list where the values are square of numbers between 1 and
20 (both included). Then the function needs to print the last 5 elements in the list.
"""
#To get a value from console input.
input_num = input("Write number:")
lis1=[]
lis2=[]
def lis(n1,n2):
"""
Generate and print last 5 element in list.
param:n1,n2
"""
i = 0
if n1 and n2 <= 20:
for x in range(n1,n2+1):
lis1.append(x*x)
lis1.reverse()
for y in lis1:
if i <=4:
lis2.append(y)
i +=1
print(lis2)
else:
print("Value out of range")
# Calling function.
lis(input_num[0],input_num[1])
|
[
0,
1,
2,
3,
4
] |
2,477 |
671a7ee3fabee6ed8dfafe1bddefb1f94322b0e5
|
<mask token>
|
<mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
|
<mask token>
class Migration(migrations.Migration):
dependencies = [('articles', '0014_auto_20180726_0926')]
operations = [migrations.AlterField(model_name='articles', name=
'cover_url', field=models.URLField(default=
'http://pcgsvdl00.bkt.clouddn.com/default/articles/article_01.jpg',
max_length=500, verbose_name='封面图')), migrations.AlterField(
model_name='series', name='cover_url', field=models.URLField(
default=
'http://pcgsvdl00.bkt.clouddn.com/default/series/series_01.jpg',
max_length=500, verbose_name='封面图')), migrations.AlterField(
model_name='specialcolumn', name='cover_url', field=models.URLField
(default=
'http://pcgsvdl00.bkt.clouddn.com/default/specialColumn/special_01.jpg'
, max_length=500, verbose_name='封面图'))]
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('articles', '0014_auto_20180726_0926')]
operations = [migrations.AlterField(model_name='articles', name=
'cover_url', field=models.URLField(default=
'http://pcgsvdl00.bkt.clouddn.com/default/articles/article_01.jpg',
max_length=500, verbose_name='封面图')), migrations.AlterField(
model_name='series', name='cover_url', field=models.URLField(
default=
'http://pcgsvdl00.bkt.clouddn.com/default/series/series_01.jpg',
max_length=500, verbose_name='封面图')), migrations.AlterField(
model_name='specialcolumn', name='cover_url', field=models.URLField
(default=
'http://pcgsvdl00.bkt.clouddn.com/default/specialColumn/special_01.jpg'
, max_length=500, verbose_name='封面图'))]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-07-26 19:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0014_auto_20180726_0926'),
]
operations = [
migrations.AlterField(
model_name='articles',
name='cover_url',
field=models.URLField(default='http://pcgsvdl00.bkt.clouddn.com/default/articles/article_01.jpg', max_length=500, verbose_name='封面图'),
),
migrations.AlterField(
model_name='series',
name='cover_url',
field=models.URLField(default='http://pcgsvdl00.bkt.clouddn.com/default/series/series_01.jpg', max_length=500, verbose_name='封面图'),
),
migrations.AlterField(
model_name='specialcolumn',
name='cover_url',
field=models.URLField(default='http://pcgsvdl00.bkt.clouddn.com/default/specialColumn/special_01.jpg', max_length=500, verbose_name='封面图'),
),
]
|
[
0,
1,
2,
3,
4
] |
2,478 |
8c0377b70b902e6e61351869a4378b4c2c50a3a7
|
<mask token>
|
def get_all_lefts(word, substring):
if len(substring) == 0:
yield (len(word), word),
elif substring[0] not in word:
yield -1,
else:
for i in range(len(word)):
if word[i] == substring[0]:
for sub_sequance in get_all_lefts(word[i + 1:], substring[1:]):
yield (i, word[:i]), *sub_sequance
<mask token>
|
def get_all_lefts(word, substring):
if len(substring) == 0:
yield (len(word), word),
elif substring[0] not in word:
yield -1,
else:
for i in range(len(word)):
if word[i] == substring[0]:
for sub_sequance in get_all_lefts(word[i + 1:], substring[1:]):
yield (i, word[:i]), *sub_sequance
if __name__ == '__main__':
word = input('')
substring = input('')
maxNum = 0
for lefts in map(list, get_all_lefts(word, substring)):
if -1 in lefts:
continue
print(lefts)
print(maxNum)
|
def get_all_lefts(word,substring):
if len(substring) == 0:
yield ((len(word),word),)
else:
if substring[0] not in word:
yield (-1,)
else:
for i in range(len(word)):
if word[i] == substring[0]:
for sub_sequance in get_all_lefts(word[i+1:],substring[1:]):
yield ((i,word[:i]),*sub_sequance)
if __name__ == '__main__':
word = input('')
substring = input('')
maxNum = 0
for lefts in map(list,get_all_lefts(word,substring)):
if -1 in lefts:
continue
print(lefts)
print(maxNum)
| null |
[
0,
1,
2,
3
] |
2,479 |
6d2581b83a2839dcbc644ca572b05b158d80b58d
|
<mask token>
class Keychains(DeviceFeature):
<mask token>
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr', read_only=
True, doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(name='ms_keychain_attr',
read_only=True, doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self
)
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(name='te_keychain_attr',
read_only=True, doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self
)
<mask token>
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
<mask token>
<mask token>
<mask token>
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
<mask token>
<mask token>
<mask token>
<mask token>
def build_unconfig(self, devices=None, interfaces=None, links=None,
apply=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
|
<mask token>
class Keychains(DeviceFeature):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr', read_only=
True, doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(name='ms_keychain_attr',
read_only=True, doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self
)
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(name='te_keychain_attr',
read_only=True, doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self
)
device_attr = managedattribute(name='device_attr', read_only=True, doc=
DeviceAttributes.__doc__)
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
key_id = managedattribute(name='key_id', default=None, type=(None,
managedattribute.test_istype(str)), doc='Configure a key')
key_enc_type = managedattribute(name='key_enc_type', default=None, type
=managedattribute.test_istype(int), doc='Set key encode type')
key_string = managedattribute(name='key_string', default=None, type=(
None, managedattribute.test_istype(str)), doc='Set key string')
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
crypto_algo = managedattribute(name='crypto_algo', default=None, type=(
None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')
lifetime_start = managedattribute(name='lifetime_start', default=None,
type=(None, managedattribute.test_istype(str)), doc=
'Set start time for sending lifetime of encryption key')
lifetime_duration = managedattribute(name='lifetime_duration', default=
None, type=(None, managedattribute.test_istype(int)), doc=
'Set key lifetime duration')
def build_config(self, devices=None, interfaces=None, links=None, apply
=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_config(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
def build_unconfig(self, devices=None, interfaces=None, links=None,
apply=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
|
<mask token>
__all__ = 'Keychains',
class Keychains(DeviceFeature):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr', read_only=
True, doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(name='ms_keychain_attr',
read_only=True, doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self
)
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(name='te_keychain_attr',
read_only=True, doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self
)
device_attr = managedattribute(name='device_attr', read_only=True, doc=
DeviceAttributes.__doc__)
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
key_id = managedattribute(name='key_id', default=None, type=(None,
managedattribute.test_istype(str)), doc='Configure a key')
key_enc_type = managedattribute(name='key_enc_type', default=None, type
=managedattribute.test_istype(int), doc='Set key encode type')
key_string = managedattribute(name='key_string', default=None, type=(
None, managedattribute.test_istype(str)), doc='Set key string')
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
crypto_algo = managedattribute(name='crypto_algo', default=None, type=(
None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')
lifetime_start = managedattribute(name='lifetime_start', default=None,
type=(None, managedattribute.test_istype(str)), doc=
'Set start time for sending lifetime of encryption key')
lifetime_duration = managedattribute(name='lifetime_duration', default=
None, type=(None, managedattribute.test_istype(int)), doc=
'Set key lifetime duration')
def build_config(self, devices=None, interfaces=None, links=None, apply
=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_config(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
def build_unconfig(self, devices=None, interfaces=None, links=None,
apply=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
|
from enum import Enum
from genie.decorator import managedattribute
from genie.conf.base import Base, DeviceFeature, LinkFeature, Interface
import genie.conf.base.attributes
from genie.libs.conf.base.feature import consolidate_feature_args
from genie.conf.base.attributes import SubAttributes, SubAttributesDict, AttributesHelper, KeyedSubAttributes
from genie.conf.base.attributes import InterfaceSubAttributes
from genie.libs import parser
from genie.abstract import Lookup
from genie.ops.base import Base as ops_Base
from genie.ops.base import Context
__all__ = 'Keychains',
class Keychains(DeviceFeature):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr', read_only=
True, doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(name='ms_keychain_attr',
read_only=True, doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes, parent=self
)
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr', read_only=
True, doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(name='te_keychain_attr',
read_only=True, doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes, parent=self
)
device_attr = managedattribute(name='device_attr', read_only=True, doc=
DeviceAttributes.__doc__)
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
key_id = managedattribute(name='key_id', default=None, type=(None,
managedattribute.test_istype(str)), doc='Configure a key')
key_enc_type = managedattribute(name='key_enc_type', default=None, type
=managedattribute.test_istype(int), doc='Set key encode type')
key_string = managedattribute(name='key_string', default=None, type=(
None, managedattribute.test_istype(str)), doc='Set key string')
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
crypto_algo = managedattribute(name='crypto_algo', default=None, type=(
None, CRYPTO_ALGO), doc='Set cryptographic authentication algorithm')
lifetime_start = managedattribute(name='lifetime_start', default=None,
type=(None, managedattribute.test_istype(str)), doc=
'Set start time for sending lifetime of encryption key')
lifetime_duration = managedattribute(name='lifetime_duration', default=
None, type=(None, managedattribute.test_istype(int)), doc=
'Set key lifetime duration')
def build_config(self, devices=None, interfaces=None, links=None, apply
=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_config(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
def build_unconfig(self, devices=None, interfaces=None, links=None,
apply=True, attributes=None, **kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = consolidate_feature_args(self, devices,
interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices, sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
|
from enum import Enum
# Genie
from genie.decorator import managedattribute
from genie.conf.base import Base, \
DeviceFeature, \
LinkFeature, \
Interface
import genie.conf.base.attributes
from genie.libs.conf.base.feature import consolidate_feature_args
from genie.conf.base.attributes import SubAttributes, \
SubAttributesDict, \
AttributesHelper, \
KeyedSubAttributes
from genie.conf.base.attributes import InterfaceSubAttributes
from genie.libs import parser
from genie.abstract import Lookup
from genie.ops.base import Base as ops_Base
from genie.ops.base import Context
__all__ = ('Keychains', )
# Structure Hierarchy:
# Keychains
# +--DeviceAttributes
# +-- KeyChainAttributes
# | +-- KeyIdAttributes
# +-- KeyChainMacSecAttributes
# | +-- KeyIdAttributes
# +-- KeyChainTunEncAttributes
# +-- KeyIdAttributes
class Keychains(DeviceFeature):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# =============================================
# Device attributes
# =============================================
class DeviceAttributes(genie.conf.base.attributes.DeviceSubAttributes):
# KeyChainAttributes
class KeyChainAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
keychain_attr = managedattribute(name='keychain_attr',
read_only=True,
doc=KeyChainAttributes.__doc__)
@keychain_attr.initter
def keychain_attr(self):
return SubAttributesDict(self.KeyChainAttributes, parent=self)
# KeyChainMacSecAttributes
class KeyChainMacSecAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.ms_key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
ms_keychain_attr = managedattribute(
name='ms_keychain_attr',
read_only=True,
doc=KeyChainMacSecAttributes.__doc__)
@ms_keychain_attr.initter
def ms_keychain_attr(self):
return SubAttributesDict(self.KeyChainMacSecAttributes,
parent=self)
# KeyChainTunEncAttributes
class KeyChainTunEncAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.te_key_chain = key
super().__init__(parent)
# KeyIdAttributes
class KeyIdAttributes(KeyedSubAttributes):
def __init__(self, parent, key):
self.key_id = key
super().__init__(parent)
key_id_attr = managedattribute(name='key_id_attr',
read_only=True,
doc=KeyIdAttributes.__doc__)
@key_id_attr.initter
def key_id_attr(self):
return SubAttributesDict(self.KeyIdAttributes, parent=self)
te_keychain_attr = managedattribute(
name='te_keychain_attr',
read_only=True,
doc=KeyChainTunEncAttributes.__doc__)
@te_keychain_attr.initter
def te_keychain_attr(self):
return SubAttributesDict(self.KeyChainTunEncAttributes,
parent=self)
device_attr = managedattribute(name='device_attr',
read_only=True,
doc=DeviceAttributes.__doc__)
@device_attr.initter
def device_attr(self):
return SubAttributesDict(self.DeviceAttributes, parent=self)
# ============ managedattributes ============#
key_id = managedattribute(name='key_id',
default=None,
type=(None, managedattribute.test_istype(str)),
doc='Configure a key')
key_enc_type = managedattribute(name='key_enc_type',
default=None,
type=managedattribute.test_istype(int),
doc='Set key encode type')
key_string = managedattribute(name='key_string',
default=None,
type=(None,
managedattribute.test_istype(str)),
doc='Set key string')
class CRYPTO_ALGO(Enum):
aes_128_cmac = 'aes-128-cmac'
aes_256_cmac = 'aes-256-cmac'
crypto_algo = managedattribute(
name='crypto_algo',
default=None,
type=(None, CRYPTO_ALGO),
doc='Set cryptographic authentication algorithm')
lifetime_start = managedattribute(
name='lifetime_start',
default=None,
type=(None, managedattribute.test_istype(str)),
doc='Set start time for sending lifetime of encryption key')
lifetime_duration = managedattribute(
name='lifetime_duration',
default=None,
type=(None, managedattribute.test_istype(int)),
doc='Set key lifetime duration')
# =========================================================
# build_config
# =========================================================
def build_config(self,
devices=None,
interfaces=None,
links=None,
apply=True,
attributes=None,
**kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = \
consolidate_feature_args(self, devices, interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices,
sort=True):
cfgs[key] = sub.build_config(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
def build_unconfig(self,
devices=None,
interfaces=None,
links=None,
apply=True,
attributes=None,
**kwargs):
attributes = AttributesHelper(self, attributes)
cfgs = {}
devices, interfaces, links = \
consolidate_feature_args(self, devices, interfaces, links)
for key, sub, attributes2 in attributes.mapping_items('device_attr',
keys=devices,
sort=True):
cfgs[key] = sub.build_unconfig(apply=False, attributes=attributes2)
if apply:
for device_name, cfg in sorted(cfgs.items()):
self.testbed.config_on_devices(cfg, fail_invalid=True)
else:
return cfgs
|
[
3,
6,
7,
8,
9
] |
2,480 |
3f4b484f435936137cb8511ec6e0aa89efb267c4
|
# Given a stream of numbers, print average (or mean) of the stream at every point.
def getAverage(prev_avg, val, n):
return ((prev_avg * n) + val) / (n + 1)
def findAndPrintMovingAvgs(arr):
cur_avg = 0
for i in range(len(arr)):
cur_avg = getAverage(cur_avg, arr[i], i)
print "Avg at", i, "is", cur_avg
arr = [10, 20, 30, 40, 50]
findAndPrintMovingAvgs(arr)
| null | null | null | null |
[
0
] |
2,481 |
32105a245f6945dbe8749140d811b20d634289bc
|
<mask token>
class CycleGANVC2LossCalculator:
def __init__(self):
pass
<mask token>
@staticmethod
def gen_loss(discriminator, y):
y_dis = discriminator(y)
return F.mean(F.softplus(-y_dis))
<mask token>
<mask token>
<mask token>
|
<mask token>
class CycleGANVC2LossCalculator:
def __init__(self):
pass
@staticmethod
def dis_loss(discriminator, y, t):
y_dis = discriminator(y)
t_dis = discriminator(t)
return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis))
@staticmethod
def gen_loss(discriminator, y):
y_dis = discriminator(y)
return F.mean(F.softplus(-y_dis))
@staticmethod
def cycle_loss(y, t):
return 10.0 * F.mean_absolute_error(y, t)
@staticmethod
def identity_loss(y, t):
return 5.0 * F.mean_absolute_error(y, t)
def train(epochs, iterations, batchsize, modeldir, extension, time_width,
mel_bins, sampling_rate, g_learning_rate, d_learning_rate, beta1, beta2,
identity_epoch, second_step, src_path, tgt_path):
dataset = DatasetLoader(src_path, tgt_path, extension, time_width,
mel_bins, sampling_rate)
print(dataset)
generator_xy = Generator()
generator_xy.to_gpu()
gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2)
generator_yx = Generator()
generator_yx.to_gpu()
gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2)
discriminator_y = Discriminator()
discriminator_y.to_gpu()
dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2)
discriminator_x = Discriminator()
discriminator_x.to_gpu()
dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2)
discriminator_xyx = Discriminator()
discriminator_xyx.to_gpu()
dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1,
beta2)
discriminator_yxy = Discriminator()
discriminator_yxy.to_gpu()
dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1,
beta2)
lossfunc = CycleGANVC2LossCalculator()
for epoch in range(epochs):
sum_dis_loss = 0
sum_gen_loss = 0
for batch in range(0, iterations, batchsize):
x, y = dataset.train(batchsize)
xy = generator_xy(x)
xyx = generator_yx(xy)
yx = generator_yx(y)
yxy = generator_xy(yx)
xy.unchain_backward()
xyx.unchain_backward()
yx.unchain_backward()
yxy.unchain_backward()
dis_loss = lossfunc.dis_loss(discriminator_y, xy, y)
dis_loss += lossfunc.dis_loss(discriminator_x, yx, x)
if second_step:
dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x)
dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y)
discriminator_xyx.cleargrads()
discriminator_yxy.cleargrads()
discriminator_x.cleargrads()
discriminator_y.cleargrads()
dis_loss.backward()
dis_x_opt.update()
dis_y_opt.update()
if second_step:
dis_xyx_opt.update()
dis_yxy_opt.update()
dis_loss.unchain_backward()
xy = generator_xy(x)
xyx = generator_yx(xy)
id_y = generator_xy(y)
yx = generator_yx(y)
yxy = generator_xy(yx)
id_x = generator_yx(x)
gen_loss = lossfunc.gen_loss(discriminator_y, xy)
gen_loss += lossfunc.gen_loss(discriminator_x, yx)
if second_step:
gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy)
gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx)
gen_loss += lossfunc.cycle_loss(x, xyx)
gen_loss += lossfunc.cycle_loss(y, xyx)
if epoch < identity_epoch:
gen_loss += lossfunc.identity_loss(id_y, y)
gen_loss += lossfunc.identity_loss(id_x, x)
generator_xy.cleargrads()
generator_yx.cleargrads()
gen_loss.backward()
gen_xy_opt.update()
gen_yx_opt.update()
gen_loss.unchain_backward()
sum_dis_loss += dis_loss.data
sum_gen_loss += gen_loss.data
if batch == 0:
serializers.save_npz(f'{modeldir}/generator_xy_{epoch}.model',
generator_xy)
serializers.save_npz(f'{modeldir}/generator_yx_{epoch}.model',
generator_yx)
print('epoch : {}'.format(epoch))
print('Generator loss : {}'.format(sum_gen_loss / iterations))
print('Discriminator loss : {}'.format(sum_dis_loss / iterations))
<mask token>
|
<mask token>
xp = cuda.cupy
cuda.get_device(0).use()
class CycleGANVC2LossCalculator:
def __init__(self):
pass
@staticmethod
def dis_loss(discriminator, y, t):
y_dis = discriminator(y)
t_dis = discriminator(t)
return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis))
@staticmethod
def gen_loss(discriminator, y):
y_dis = discriminator(y)
return F.mean(F.softplus(-y_dis))
@staticmethod
def cycle_loss(y, t):
return 10.0 * F.mean_absolute_error(y, t)
@staticmethod
def identity_loss(y, t):
return 5.0 * F.mean_absolute_error(y, t)
def train(epochs, iterations, batchsize, modeldir, extension, time_width,
mel_bins, sampling_rate, g_learning_rate, d_learning_rate, beta1, beta2,
identity_epoch, second_step, src_path, tgt_path):
dataset = DatasetLoader(src_path, tgt_path, extension, time_width,
mel_bins, sampling_rate)
print(dataset)
generator_xy = Generator()
generator_xy.to_gpu()
gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2)
generator_yx = Generator()
generator_yx.to_gpu()
gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2)
discriminator_y = Discriminator()
discriminator_y.to_gpu()
dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2)
discriminator_x = Discriminator()
discriminator_x.to_gpu()
dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2)
discriminator_xyx = Discriminator()
discriminator_xyx.to_gpu()
dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1,
beta2)
discriminator_yxy = Discriminator()
discriminator_yxy.to_gpu()
dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1,
beta2)
lossfunc = CycleGANVC2LossCalculator()
for epoch in range(epochs):
sum_dis_loss = 0
sum_gen_loss = 0
for batch in range(0, iterations, batchsize):
x, y = dataset.train(batchsize)
xy = generator_xy(x)
xyx = generator_yx(xy)
yx = generator_yx(y)
yxy = generator_xy(yx)
xy.unchain_backward()
xyx.unchain_backward()
yx.unchain_backward()
yxy.unchain_backward()
dis_loss = lossfunc.dis_loss(discriminator_y, xy, y)
dis_loss += lossfunc.dis_loss(discriminator_x, yx, x)
if second_step:
dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x)
dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y)
discriminator_xyx.cleargrads()
discriminator_yxy.cleargrads()
discriminator_x.cleargrads()
discriminator_y.cleargrads()
dis_loss.backward()
dis_x_opt.update()
dis_y_opt.update()
if second_step:
dis_xyx_opt.update()
dis_yxy_opt.update()
dis_loss.unchain_backward()
xy = generator_xy(x)
xyx = generator_yx(xy)
id_y = generator_xy(y)
yx = generator_yx(y)
yxy = generator_xy(yx)
id_x = generator_yx(x)
gen_loss = lossfunc.gen_loss(discriminator_y, xy)
gen_loss += lossfunc.gen_loss(discriminator_x, yx)
if second_step:
gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy)
gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx)
gen_loss += lossfunc.cycle_loss(x, xyx)
gen_loss += lossfunc.cycle_loss(y, xyx)
if epoch < identity_epoch:
gen_loss += lossfunc.identity_loss(id_y, y)
gen_loss += lossfunc.identity_loss(id_x, x)
generator_xy.cleargrads()
generator_yx.cleargrads()
gen_loss.backward()
gen_xy_opt.update()
gen_yx_opt.update()
gen_loss.unchain_backward()
sum_dis_loss += dis_loss.data
sum_gen_loss += gen_loss.data
if batch == 0:
serializers.save_npz(f'{modeldir}/generator_xy_{epoch}.model',
generator_xy)
serializers.save_npz(f'{modeldir}/generator_yx_{epoch}.model',
generator_yx)
print('epoch : {}'.format(epoch))
print('Generator loss : {}'.format(sum_gen_loss / iterations))
print('Discriminator loss : {}'.format(sum_dis_loss / iterations))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='StarGANVC2')
parser.add_argument('--e', type=int, default=50, help=
'the number of epochs')
parser.add_argument('--i', type=int, default=1000, help=
'the number of iterations')
parser.add_argument('--b', type=int, default=16, help='batch size')
parser.add_argument('--modeldir', type=Path, default='modeldir', help=
'model output directory')
parser.add_argument('--ext', type=str, default='.npy', help=
'extension of training data')
parser.add_argument('--tw', type=int, default=128, help=
'time width of spectral envelope')
parser.add_argument('--mb', type=int, default=36, help=
'mel bins of spectral envelope')
parser.add_argument('--sr', type=int, default=22050, help=
'sampling rate of audio data')
parser.add_argument('--glr', type=float, default=0.0002, help=
'learning rate of Adam on generator')
parser.add_argument('--dlr', type=float, default=0.0001, help=
'learning rate of Adam on discriminator')
parser.add_argument('--b1', type=float, default=0.5, help='beta1 of Adam')
parser.add_argument('--b2', type=float, default=0.999, help='beta2 of Adam'
)
parser.add_argument('--ie', type=int, default=20, help=
'time spans enabling identity mapping loss')
parser.add_argument('--second', action='store_true', help=
'enabling second step of adversaria loss')
parser.add_argument('--src', type=Path, help=
'path which includes source data')
parser.add_argument('--tgt', type=Path, help=
'path which includes target data')
args = parser.parse_args()
modeldir = args.modeldir
modeldir.mkdir(exist_ok=True)
train(args.e, args.i, args.b, modeldir, args.ext, args.tw, args.mb,
args.sr, args.glr, args.dlr, args.b1, args.b2, args.ie, args.second,
args.src, args.tgt)
|
import chainer
import chainer.functions as F
import numpy as np
import argparse
from model import Generator, Discriminator
from chainer import cuda, serializers
from pathlib import Path
from utils import set_optimizer
from dataset import DatasetLoader
xp = cuda.cupy
cuda.get_device(0).use()
class CycleGANVC2LossCalculator:
def __init__(self):
pass
@staticmethod
def dis_loss(discriminator, y, t):
y_dis = discriminator(y)
t_dis = discriminator(t)
return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis))
@staticmethod
def gen_loss(discriminator, y):
y_dis = discriminator(y)
return F.mean(F.softplus(-y_dis))
@staticmethod
def cycle_loss(y, t):
return 10.0 * F.mean_absolute_error(y, t)
@staticmethod
def identity_loss(y, t):
return 5.0 * F.mean_absolute_error(y, t)
def train(epochs, iterations, batchsize, modeldir, extension, time_width,
mel_bins, sampling_rate, g_learning_rate, d_learning_rate, beta1, beta2,
identity_epoch, second_step, src_path, tgt_path):
dataset = DatasetLoader(src_path, tgt_path, extension, time_width,
mel_bins, sampling_rate)
print(dataset)
generator_xy = Generator()
generator_xy.to_gpu()
gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2)
generator_yx = Generator()
generator_yx.to_gpu()
gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2)
discriminator_y = Discriminator()
discriminator_y.to_gpu()
dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2)
discriminator_x = Discriminator()
discriminator_x.to_gpu()
dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2)
discriminator_xyx = Discriminator()
discriminator_xyx.to_gpu()
dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1,
beta2)
discriminator_yxy = Discriminator()
discriminator_yxy.to_gpu()
dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1,
beta2)
lossfunc = CycleGANVC2LossCalculator()
for epoch in range(epochs):
sum_dis_loss = 0
sum_gen_loss = 0
for batch in range(0, iterations, batchsize):
x, y = dataset.train(batchsize)
xy = generator_xy(x)
xyx = generator_yx(xy)
yx = generator_yx(y)
yxy = generator_xy(yx)
xy.unchain_backward()
xyx.unchain_backward()
yx.unchain_backward()
yxy.unchain_backward()
dis_loss = lossfunc.dis_loss(discriminator_y, xy, y)
dis_loss += lossfunc.dis_loss(discriminator_x, yx, x)
if second_step:
dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x)
dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y)
discriminator_xyx.cleargrads()
discriminator_yxy.cleargrads()
discriminator_x.cleargrads()
discriminator_y.cleargrads()
dis_loss.backward()
dis_x_opt.update()
dis_y_opt.update()
if second_step:
dis_xyx_opt.update()
dis_yxy_opt.update()
dis_loss.unchain_backward()
xy = generator_xy(x)
xyx = generator_yx(xy)
id_y = generator_xy(y)
yx = generator_yx(y)
yxy = generator_xy(yx)
id_x = generator_yx(x)
gen_loss = lossfunc.gen_loss(discriminator_y, xy)
gen_loss += lossfunc.gen_loss(discriminator_x, yx)
if second_step:
gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy)
gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx)
gen_loss += lossfunc.cycle_loss(x, xyx)
gen_loss += lossfunc.cycle_loss(y, xyx)
if epoch < identity_epoch:
gen_loss += lossfunc.identity_loss(id_y, y)
gen_loss += lossfunc.identity_loss(id_x, x)
generator_xy.cleargrads()
generator_yx.cleargrads()
gen_loss.backward()
gen_xy_opt.update()
gen_yx_opt.update()
gen_loss.unchain_backward()
sum_dis_loss += dis_loss.data
sum_gen_loss += gen_loss.data
if batch == 0:
serializers.save_npz(f'{modeldir}/generator_xy_{epoch}.model',
generator_xy)
serializers.save_npz(f'{modeldir}/generator_yx_{epoch}.model',
generator_yx)
print('epoch : {}'.format(epoch))
print('Generator loss : {}'.format(sum_gen_loss / iterations))
print('Discriminator loss : {}'.format(sum_dis_loss / iterations))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='StarGANVC2')
parser.add_argument('--e', type=int, default=50, help=
'the number of epochs')
parser.add_argument('--i', type=int, default=1000, help=
'the number of iterations')
parser.add_argument('--b', type=int, default=16, help='batch size')
parser.add_argument('--modeldir', type=Path, default='modeldir', help=
'model output directory')
parser.add_argument('--ext', type=str, default='.npy', help=
'extension of training data')
parser.add_argument('--tw', type=int, default=128, help=
'time width of spectral envelope')
parser.add_argument('--mb', type=int, default=36, help=
'mel bins of spectral envelope')
parser.add_argument('--sr', type=int, default=22050, help=
'sampling rate of audio data')
parser.add_argument('--glr', type=float, default=0.0002, help=
'learning rate of Adam on generator')
parser.add_argument('--dlr', type=float, default=0.0001, help=
'learning rate of Adam on discriminator')
parser.add_argument('--b1', type=float, default=0.5, help='beta1 of Adam')
parser.add_argument('--b2', type=float, default=0.999, help='beta2 of Adam'
)
parser.add_argument('--ie', type=int, default=20, help=
'time spans enabling identity mapping loss')
parser.add_argument('--second', action='store_true', help=
'enabling second step of adversaria loss')
parser.add_argument('--src', type=Path, help=
'path which includes source data')
parser.add_argument('--tgt', type=Path, help=
'path which includes target data')
args = parser.parse_args()
modeldir = args.modeldir
modeldir.mkdir(exist_ok=True)
train(args.e, args.i, args.b, modeldir, args.ext, args.tw, args.mb,
args.sr, args.glr, args.dlr, args.b1, args.b2, args.ie, args.second,
args.src, args.tgt)
|
import chainer
import chainer.functions as F
import numpy as np
import argparse
from model import Generator, Discriminator
from chainer import cuda, serializers
from pathlib import Path
from utils import set_optimizer
from dataset import DatasetLoader
xp = cuda.cupy
cuda.get_device(0).use()
class CycleGANVC2LossCalculator:
def __init__(self):
pass
@staticmethod
def dis_loss(discriminator, y, t):
y_dis = discriminator(y)
t_dis = discriminator(t)
return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis))
@staticmethod
def gen_loss(discriminator, y):
y_dis = discriminator(y)
return F.mean(F.softplus(-y_dis))
@staticmethod
def cycle_loss(y, t):
return 10.0 * F.mean_absolute_error(y, t)
@staticmethod
def identity_loss(y, t):
return 5.0 * F.mean_absolute_error(y, t)
def train(epochs,
iterations,
batchsize,
modeldir,
extension,
time_width,
mel_bins,
sampling_rate,
g_learning_rate,
d_learning_rate,
beta1,
beta2,
identity_epoch,
second_step,
src_path,
tgt_path):
# Dataset definiton
dataset = DatasetLoader(src_path,
tgt_path,
extension,
time_width,
mel_bins,
sampling_rate)
print(dataset)
# Model & Optimizer definition
generator_xy = Generator()
generator_xy.to_gpu()
gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2)
generator_yx = Generator()
generator_yx.to_gpu()
gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2)
discriminator_y = Discriminator()
discriminator_y.to_gpu()
dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2)
discriminator_x = Discriminator()
discriminator_x.to_gpu()
dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2)
discriminator_xyx = Discriminator()
discriminator_xyx.to_gpu()
dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1, beta2)
discriminator_yxy = Discriminator()
discriminator_yxy.to_gpu()
dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1, beta2)
# Loss function definition
lossfunc = CycleGANVC2LossCalculator()
for epoch in range(epochs):
sum_dis_loss = 0
sum_gen_loss = 0
for batch in range(0, iterations, batchsize):
x, y = dataset.train(batchsize)
xy = generator_xy(x)
xyx = generator_yx(xy)
yx = generator_yx(y)
yxy = generator_xy(yx)
xy.unchain_backward()
xyx.unchain_backward()
yx.unchain_backward()
yxy.unchain_backward()
dis_loss = lossfunc.dis_loss(discriminator_y, xy, y)
dis_loss += lossfunc.dis_loss(discriminator_x, yx, x)
if second_step:
dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x)
dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y)
discriminator_xyx.cleargrads()
discriminator_yxy.cleargrads()
discriminator_x.cleargrads()
discriminator_y.cleargrads()
dis_loss.backward()
dis_x_opt.update()
dis_y_opt.update()
if second_step:
dis_xyx_opt.update()
dis_yxy_opt.update()
dis_loss.unchain_backward()
xy = generator_xy(x)
xyx = generator_yx(xy)
id_y = generator_xy(y)
yx = generator_yx(y)
yxy = generator_xy(yx)
id_x = generator_yx(x)
gen_loss = lossfunc.gen_loss(discriminator_y, xy)
gen_loss += lossfunc.gen_loss(discriminator_x, yx)
if second_step:
gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy)
gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx)
gen_loss += lossfunc.cycle_loss(x, xyx)
gen_loss += lossfunc.cycle_loss(y, xyx)
if epoch < identity_epoch:
gen_loss += lossfunc.identity_loss(id_y, y)
gen_loss += lossfunc.identity_loss(id_x, x)
generator_xy.cleargrads()
generator_yx.cleargrads()
gen_loss.backward()
gen_xy_opt.update()
gen_yx_opt.update()
gen_loss.unchain_backward()
sum_dis_loss += dis_loss.data
sum_gen_loss += gen_loss.data
if batch == 0:
serializers.save_npz(f"{modeldir}/generator_xy_{epoch}.model", generator_xy)
serializers.save_npz(f"{modeldir}/generator_yx_{epoch}.model", generator_yx)
print('epoch : {}'.format(epoch))
print('Generator loss : {}'.format(sum_gen_loss / iterations))
print('Discriminator loss : {}'.format(sum_dis_loss / iterations))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="StarGANVC2")
parser.add_argument('--e', type=int, default=50, help="the number of epochs")
parser.add_argument('--i', type=int, default=1000, help="the number of iterations")
parser.add_argument('--b', type=int, default=16, help="batch size")
parser.add_argument('--modeldir', type=Path, default="modeldir", help="model output directory")
parser.add_argument('--ext', type=str, default=".npy", help="extension of training data")
parser.add_argument('--tw', type=int, default=128, help="time width of spectral envelope")
parser.add_argument('--mb', type=int, default=36, help="mel bins of spectral envelope")
parser.add_argument('--sr', type=int, default=22050, help="sampling rate of audio data")
parser.add_argument('--glr', type=float, default=0.0002, help="learning rate of Adam on generator")
parser.add_argument('--dlr', type=float, default=0.0001, help="learning rate of Adam on discriminator")
parser.add_argument('--b1', type=float, default=0.5, help="beta1 of Adam")
parser.add_argument('--b2', type=float, default=0.999, help="beta2 of Adam")
parser.add_argument('--ie', type=int, default=20, help="time spans enabling identity mapping loss")
parser.add_argument('--second', action="store_true", help="enabling second step of adversaria loss")
parser.add_argument('--src', type=Path, help="path which includes source data")
parser.add_argument('--tgt', type=Path, help="path which includes target data")
args = parser.parse_args()
modeldir = args.modeldir
modeldir.mkdir(exist_ok=True)
train(args.e, args.i, args.b, modeldir, args.ext, args.tw, args.mb, args.sr,
args.glr, args.dlr, args.b1, args.b2, args.ie, args.second,
args.src, args.tgt)
|
[
3,
7,
9,
10,
11
] |
2,482 |
3e1e2de555667bf09162cd6c62cad35dabbd0f54
|
from flask import Flask
from flask import render_template
# Creates a Flask application called 'app'
app = Flask(__name__, template_folder='C:\Users\jwhitehead\Documents\Webdev\Angular Web App')
# The route to display the HTML template on
@app.route('/')
def host():
return render_template('index.html')
# Run the Flask application
if __name__ == "__main__":
app.run(host='localhost', port='80')
| null | null | null | null |
[
0
] |
2,483 |
e2573a5dc507e9aeb811fbc254129aeb6e54cc0b
|
<mask token>
class MyAdmin(admin.ModelAdmin):
<mask token>
def has_delete_permission(self, request, obj=None):
return False
class CalcResultAdmin(MyAdmin):
list_display = 'result', 'message', 'time'
search_fields = 'result', 'message', 'time'
<mask token>
|
<mask token>
class MyAdmin(admin.ModelAdmin):
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class CalcResultAdmin(MyAdmin):
list_display = 'result', 'message', 'time'
search_fields = 'result', 'message', 'time'
<mask token>
|
<mask token>
class MyAdmin(admin.ModelAdmin):
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class CalcResultAdmin(MyAdmin):
list_display = 'result', 'message', 'time'
search_fields = 'result', 'message', 'time'
admin.site.register(CalcResult, CalcResultAdmin)
|
from django.contrib import admin
from calc.models import CalcResult
class MyAdmin(admin.ModelAdmin):
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class CalcResultAdmin(MyAdmin):
list_display = 'result', 'message', 'time'
search_fields = 'result', 'message', 'time'
admin.site.register(CalcResult, CalcResultAdmin)
|
from django.contrib import admin
from calc.models import CalcResult
class MyAdmin(admin.ModelAdmin):
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class CalcResultAdmin(MyAdmin):
list_display = ('result', 'message', 'time',)
search_fields = ('result', 'message', 'time',)
admin.site.register(CalcResult, CalcResultAdmin)
|
[
4,
5,
6,
7,
8
] |
2,484 |
7c9c13974e1deeb55f08c9e251e8c876cedcad6b
|
<mask token>
@calculate_time
def factorial(num):
time.sleep(2)
print(math.factorial(num))
<mask token>
|
<mask token>
def calculate_time(func):
def inner_fn(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
print("Time taken to execute '{}' function is: {} seconds".format(
func.__name__, round(end - start, 2)))
return inner_fn
@calculate_time
def factorial(num):
time.sleep(2)
print(math.factorial(num))
<mask token>
|
<mask token>
def calculate_time(func):
def inner_fn(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
print("Time taken to execute '{}' function is: {} seconds".format(
func.__name__, round(end - start, 2)))
return inner_fn
@calculate_time
def factorial(num):
time.sleep(2)
print(math.factorial(num))
factorial(20)
|
import math
import time
def calculate_time(func):
def inner_fn(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
print("Time taken to execute '{}' function is: {} seconds".format(
func.__name__, round(end - start, 2)))
return inner_fn
@calculate_time
def factorial(num):
time.sleep(2)
print(math.factorial(num))
factorial(20)
|
import math
import time
def calculate_time(func):
def inner_fn(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
print("Time taken to execute \'{}\' function is: {} seconds".format(func.__name__, round(end - start, 2)))
return inner_fn
@calculate_time
def factorial(num):
time.sleep(2)
print(math.factorial(num))
factorial(20)
|
[
1,
2,
3,
4,
5
] |
2,485 |
fef4749ce7b8668a5a138aa1245010866a85c853
|
<mask token>
|
class Solution:
<mask token>
|
class Solution:
def asteroidCollision(self, asteroids: List[int]) ->List[int]:
output = []
index = 0
for i in asteroids:
if len(output) == 0:
index = 0
if index == 0:
output.append(i)
index += 1
continue
elif output[-1] < 0 and i >= 0:
output.append(i)
elif output[-1] >= 0 and i >= 0:
output.append(i)
else:
append = True
while True:
if output[-1] < 0:
break
elif abs(output[-1]) == abs(i):
del output[-1]
append = False
break
elif abs(output[-1]) < abs(i):
del output[-1]
else:
append = False
break
if len(output) == 0:
break
if append:
output.append(i)
return output
|
class Solution:
def asteroidCollision(self, asteroids: List[int]) -> List[int]:
output = []
index = 0
for i in asteroids:
if len(output) == 0:
index = 0
if index == 0:
output.append(i)
index+=1
continue
elif output[-1]<0 and i >=0:
output.append(i)
elif output[-1]>=0 and i >=0:
output.append(i)
else:
append = True
while True:
if output[-1]<0:
break
elif abs(output[-1]) == abs(i):
del output[-1]
append = False
break
elif abs(output[-1]) < abs(i):
del output[-1]
else:
append = False
break
if len(output)==0:
break
if append:
output.append(i)
return output
| null |
[
0,
1,
2,
3
] |
2,486 |
0372cdbae8c5b0bbcbade86a5a7de28c1ee513b1
|
<mask token>
|
<mask token>
tkinter.filedialog.askopenfilename()
<mask token>
from_file.close()
<mask token>
to_file.write('Copy\n')
to_file.write(contents)
to_file.close()
|
<mask token>
tkinter.filedialog.askopenfilename()
from_filename = tkinter.filedialog.askopenfilename()
to_filename = tkinter.filedialog.asksaveasfilename()
from_file = open(from_filename, 'r')
contents = from_file.read()
from_file.close()
to_file = open(to_filename, 'w')
to_file.write('Copy\n')
to_file.write(contents)
to_file.close()
|
import tkinter.filedialog
tkinter.filedialog.askopenfilename()
from_filename = tkinter.filedialog.askopenfilename()
to_filename = tkinter.filedialog.asksaveasfilename()
from_file = open(from_filename, 'r')
contents = from_file.read()
from_file.close()
to_file = open(to_filename, 'w')
to_file.write('Copy\n')
to_file.write(contents)
to_file.close()
|
# Write files
# Writing to a file within a Python program:
# In order to write to a file, we use file.write(str).
# This method writes a string to a file.
# The method write() works like Python's print() function, except it does not add a newline ("\n") character.
# File dialogs:
# Module tkinter has a submodule called filedialog. We import it like this:
import tkinter.filedialog
# Function askopenfilename() asks the user to select a file to open:
tkinter.filedialog.askopenfilename()
# This function returns the full path to the file, so we can use that when we call the function open() to open that file.
from_filename = tkinter.filedialog.askopenfilename()
# Function asksaveasfilename() asks the user to select a file to save to, and provides a warning if the file already exists.
to_filename = tkinter.filedialog.asksaveasfilename()
### Example ###
# Below is a program that copies a file, but puts "Copy" as the first line of the copied file.
# First prompt the user to pick a file, then open the file that we want to read from and get the contents:
from_file = open(from_filename, 'r')
contents = from_file.read()
from_file.close()
# Now we can open the file we want to write to and write the contents:
to_file = open(to_filename, 'w')
to_file.write('Copy\n') # we have to add the newline ourselves
to_file.write(contents) # now write the contents of the file
to_file.close()
|
[
0,
1,
2,
3,
4
] |
2,487 |
84db1803a352e0ed8c01b7166f522d46ec89b6f5
|
<mask token>
|
<mask token>
for train_index, test_index in kf.split(x):
xtr = x.iloc[train_index]
ytr = y[train_index]
<mask token>
if k % 2 == 0:
k = k + 1
else:
k = k
<mask token>
print('Skor KNN: ', round(cross_val_score(knn, xtr, ytr, cv=5).mean() * 100
), ' %')
print('Skor Logistic Regression: ', round(cross_val_score(logreg, xtr, ytr,
cv=5).mean() * 100), ' %')
print('Skor Random Forest: ', round(cross_val_score(ranfor, xtr, ytr, cv=5)
.mean() * 100), ' %')
print('Skor Decision Tree: ', round(cross_val_score(dec, xtr, ytr, cv=5).
mean() * 100), ' %')
<mask token>
|
<mask token>
df = pd.read_csv('data.csv')
df = df.fillna(np.NaN)
df['Target'] = 0
df['Target_name'] = 'Non-Target'
df['Target'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df['Potential'] >=
80)] = 1
df['Target_name'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df[
'Potential'] >= 80)] = 'Target'
x = df.loc[:, ['Age', 'Overall', 'Potential']]
y = df['Target']
kf = KFold(n_splits=5)
for train_index, test_index in kf.split(x):
xtr = x.iloc[train_index]
ytr = y[train_index]
<mask token>
k = round(len(x) ** 0.5)
if k % 2 == 0:
k = k + 1
else:
k = k
knn = KNeighborsClassifier(n_neighbors=k)
<mask token>
logreg = LogisticRegression(multi_class='auto', solver='liblinear')
<mask token>
ranfor = RandomForestClassifier(n_estimators=50)
<mask token>
dec = DecisionTreeClassifier()
print('Skor KNN: ', round(cross_val_score(knn, xtr, ytr, cv=5).mean() * 100
), ' %')
print('Skor Logistic Regression: ', round(cross_val_score(logreg, xtr, ytr,
cv=5).mean() * 100), ' %')
print('Skor Random Forest: ', round(cross_val_score(ranfor, xtr, ytr, cv=5)
.mean() * 100), ' %')
print('Skor Decision Tree: ', round(cross_val_score(dec, xtr, ytr, cv=5).
mean() * 100), ' %')
<mask token>
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
df = pd.read_csv('data.csv')
df = df.fillna(np.NaN)
df['Target'] = 0
df['Target_name'] = 'Non-Target'
df['Target'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df['Potential'] >=
80)] = 1
df['Target_name'][(df['Age'] <= 25) & (df['Overall'] >= 80) & (df[
'Potential'] >= 80)] = 'Target'
x = df.loc[:, ['Age', 'Overall', 'Potential']]
y = df['Target']
kf = KFold(n_splits=5)
for train_index, test_index in kf.split(x):
xtr = x.iloc[train_index]
ytr = y[train_index]
<mask token>
k = round(len(x) ** 0.5)
if k % 2 == 0:
k = k + 1
else:
k = k
knn = KNeighborsClassifier(n_neighbors=k)
<mask token>
logreg = LogisticRegression(multi_class='auto', solver='liblinear')
<mask token>
ranfor = RandomForestClassifier(n_estimators=50)
<mask token>
dec = DecisionTreeClassifier()
print('Skor KNN: ', round(cross_val_score(knn, xtr, ytr, cv=5).mean() * 100
), ' %')
print('Skor Logistic Regression: ', round(cross_val_score(logreg, xtr, ytr,
cv=5).mean() * 100), ' %')
print('Skor Random Forest: ', round(cross_val_score(ranfor, xtr, ytr, cv=5)
.mean() * 100), ' %')
print('Skor Decision Tree: ', round(cross_val_score(dec, xtr, ytr, cv=5).
mean() * 100), ' %')
<mask token>
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
df=pd.read_csv('data.csv')
df=df.fillna(np.NaN)
#kita isi df dengan kolom target = 0, target_name = 0 , agar memudahkan untuk training
df['Target']=0
df['Target_name']='Non-Target'
# print(df)
#tandai target dengan angka 1,target_name='Target' pada dataframe usia <= 25, overall >= 80, dan potential >= 80
df['Target'][(df['Age']<=25)&(df['Overall']>=80)&(df['Potential']>=80)]=1
df['Target_name'][(df['Age']<=25)&(df['Overall']>=80)&(df['Potential']>=80)]='Target'
x=df.loc[:,['Age','Overall','Potential']]
y=df['Target']
kf=KFold(n_splits = 5)
for train_index,test_index in kf.split(x):
xtr=x.iloc[train_index]
ytr=y[train_index]
'''
KNN
nilai k terbaik atau n terbaik dapat dicari dengan cara sqrt(n_data) lalu pilih yg odd/ganjil
cari len dari data (banyak data) lalu kalikan pangkat setengah
'''
k = round(len(x) ** .5)
if((k%2) == 0):
k=k+1
else:
k=k
knn=KNeighborsClassifier(n_neighbors=k)
'''
Logistic Regression
'''
logreg=LogisticRegression(multi_class='auto',solver='liblinear')
'''
Random Forest
'''
ranfor=RandomForestClassifier(n_estimators=50)
'''
Decision Tree
'''
dec=DecisionTreeClassifier()
print("Skor KNN: ",round(cross_val_score(knn,xtr,ytr,cv=5).mean()*100),' %')
print("Skor Logistic Regression: ",round(cross_val_score(logreg,xtr,ytr,cv=5).mean()*100),' %')
print("Skor Random Forest: ",round(cross_val_score(ranfor,xtr,ytr,cv=5).mean()*100),' %')
print("Skor Decision Tree: ",round(cross_val_score(dec,xtr,ytr,cv=5).mean()*100),' %')
'''
Skor KNN: 96.0 %
Skor Logistic Regression: 97.0 %
Skor Random Forest: 96.0 %
Skor Decision Tree: 93.0 %
'''
|
[
0,
1,
2,
3,
4
] |
2,488 |
0b0b928aef9a4e9953b02639bf5e7769cc4389d7
|
<mask token>
|
default_app_config = 'reman.apps.RemanConfig'
| null | null | null |
[
0,
1
] |
2,489 |
34e902fbced13629657494eedfe385d3b5ae3f55
|
# TUPLE IMUTAVEL
# GERALMENTE HETEORGENEA
# tupla com 1 ou 0 elementos
#
# empty = ()
# singleton = 'breno',
# print(type(empty))
# print(singleton)
# tuplas podem ser aninhadas
# t = 12345, 54321, 'hello!'
# u = t, (1, 2, 3, 4, 5)
#imutaveis
# t[0] = 88888
| null | null | null |
[
0,
1
] |
|
2,490 |
93133b9a62d50e4e48e37721585116c1c7d70761
|
<mask token>
class GroupElement:
<mask token>
def _groupmulprops(self, x):
if x == self.group.identity:
return True, deepcopy(self)
if self == self.group.identity:
return True, deepcopy(x)
if self.group.inv(self) == x or self == self.group.inv(x):
return True, deepcopy(self.group.identity)
return False, None
def __mul__(self, x):
if isinstance(x, SubstituteTerm):
return NotImplemented
matched, term = self._groupmulprops(x)
result = self.group.op(self, x) if not matched else term
return self.group.simplify_term(result) if hasattr(self.group,
'simplify_term') else result
<mask token>
def __truediv__(self, x):
return self.__mul__(self.group.inv(x))
def __rtruediv__(self, x):
return self.__rmul__(self.group.inv(x))
class GroupVariable(GroupElement, Variable):
def __init__(self, g: Group, symbol: str):
GroupElement.__init__(self, g)
Variable.__init__(self, symbol)
def __hash__(self):
return hash((self.group, self.symbol))
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.symbol == x.symbol
class GroupFuncTerm(GroupElement, FuncTerm):
def __init__(self, g: Group, a_term: ATerm):
GroupElement.__init__(self, g)
FuncTerm.__init__(self, a_term.function, a_term.arguments)
self.term = a_term
def set_arguments(self, args):
self.term.arguments = tuple(args)
self.arguments = tuple(args)
def set_function(self, function: Function):
self.function = function
self.term.function = function
def __hash__(self):
return hash((self.group, self.term))
def __repr__(self):
return repr(self.term)
def __str__(self):
return str(self.term)
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.term == x.term
class GroupConstant(GroupElement, Constant):
def __init__(self, g: Group, symbol: str):
GroupElement.__init__(self, g)
Constant.__init__(self, symbol)
def __hash__(self):
return hash((self.group, self.symbol))
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.symbol == x.symbol
class AbelianGroup(Group):
def __init__(self, name: str, operation: ACFunction, inv_symbol=None,
identity_symbol='e'):
if not isinstance(operation, ACFunction):
raise ValueError(
'operation must be associative and commutative (ACFunction)')
super().__init__(name, operation, inv_symbol=inv_symbol,
identity_symbol=identity_symbol)
|
<mask token>
class GroupInverseFunction(Function):
<mask token>
<mask token>
class GroupFunction(Function):
def __init__(self, g: Group, f: Function):
super().__init__(f.symbol, f.arity)
self.group = g
self.function = f
def __call__(self, *args):
term = self.function(*args)
if not isinstance(term, FuncTerm) or term.function.arity == 0:
return deepcopy(term)
result = GroupFuncTerm(self.group, term)
result.set_function(self)
return result
class GroupElement:
def __init__(self, g: Group):
self.group = g
def _groupmulprops(self, x):
if x == self.group.identity:
return True, deepcopy(self)
if self == self.group.identity:
return True, deepcopy(x)
if self.group.inv(self) == x or self == self.group.inv(x):
return True, deepcopy(self.group.identity)
return False, None
def __mul__(self, x):
if isinstance(x, SubstituteTerm):
return NotImplemented
matched, term = self._groupmulprops(x)
result = self.group.op(self, x) if not matched else term
return self.group.simplify_term(result) if hasattr(self.group,
'simplify_term') else result
def __rmul__(self, x):
matched, term = self._groupmulprops(x)
result = self.group.op(x, self) if not matched else term
return self.group.simplify_term(result) if hasattr(self.group,
'simplify_term') else result
def __truediv__(self, x):
return self.__mul__(self.group.inv(x))
def __rtruediv__(self, x):
return self.__rmul__(self.group.inv(x))
class GroupVariable(GroupElement, Variable):
def __init__(self, g: Group, symbol: str):
GroupElement.__init__(self, g)
Variable.__init__(self, symbol)
def __hash__(self):
return hash((self.group, self.symbol))
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.symbol == x.symbol
class GroupFuncTerm(GroupElement, FuncTerm):
def __init__(self, g: Group, a_term: ATerm):
GroupElement.__init__(self, g)
FuncTerm.__init__(self, a_term.function, a_term.arguments)
self.term = a_term
def set_arguments(self, args):
self.term.arguments = tuple(args)
self.arguments = tuple(args)
def set_function(self, function: Function):
self.function = function
self.term.function = function
def __hash__(self):
return hash((self.group, self.term))
def __repr__(self):
return repr(self.term)
def __str__(self):
return str(self.term)
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.term == x.term
class GroupConstant(GroupElement, Constant):
def __init__(self, g: Group, symbol: str):
GroupElement.__init__(self, g)
Constant.__init__(self, symbol)
def __hash__(self):
return hash((self.group, self.symbol))
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.symbol == x.symbol
class AbelianGroup(Group):
def __init__(self, name: str, operation: ACFunction, inv_symbol=None,
identity_symbol='e'):
if not isinstance(operation, ACFunction):
raise ValueError(
'operation must be associative and commutative (ACFunction)')
super().__init__(name, operation, inv_symbol=inv_symbol,
identity_symbol=identity_symbol)
|
<mask token>
class Group:
<mask token>
<mask token>
def __eq__(self, x):
return type(self) is type(x
) and self.name == x.name and self.op == x.op
class GroupInverseFunction(Function):
def __init__(self, g: Group, symbol: str):
super().__init__(symbol, 1)
self.group = g
def __call__(self, x):
if x == self.group.identity:
return deepcopy(self.group.identity)
if isinstance(x, FuncTerm) and isinstance(x.function,
GroupInverseFunction):
return x.arguments[0]
return FuncTerm(self, (x,))
class GroupFunction(Function):
def __init__(self, g: Group, f: Function):
super().__init__(f.symbol, f.arity)
self.group = g
self.function = f
def __call__(self, *args):
term = self.function(*args)
if not isinstance(term, FuncTerm) or term.function.arity == 0:
return deepcopy(term)
result = GroupFuncTerm(self.group, term)
result.set_function(self)
return result
class GroupElement:
def __init__(self, g: Group):
self.group = g
def _groupmulprops(self, x):
if x == self.group.identity:
return True, deepcopy(self)
if self == self.group.identity:
return True, deepcopy(x)
if self.group.inv(self) == x or self == self.group.inv(x):
return True, deepcopy(self.group.identity)
return False, None
def __mul__(self, x):
if isinstance(x, SubstituteTerm):
return NotImplemented
matched, term = self._groupmulprops(x)
result = self.group.op(self, x) if not matched else term
return self.group.simplify_term(result) if hasattr(self.group,
'simplify_term') else result
def __rmul__(self, x):
matched, term = self._groupmulprops(x)
result = self.group.op(x, self) if not matched else term
return self.group.simplify_term(result) if hasattr(self.group,
'simplify_term') else result
def __truediv__(self, x):
return self.__mul__(self.group.inv(x))
def __rtruediv__(self, x):
return self.__rmul__(self.group.inv(x))
class GroupVariable(GroupElement, Variable):
def __init__(self, g: Group, symbol: str):
GroupElement.__init__(self, g)
Variable.__init__(self, symbol)
def __hash__(self):
return hash((self.group, self.symbol))
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.symbol == x.symbol
class GroupFuncTerm(GroupElement, FuncTerm):
def __init__(self, g: Group, a_term: ATerm):
GroupElement.__init__(self, g)
FuncTerm.__init__(self, a_term.function, a_term.arguments)
self.term = a_term
def set_arguments(self, args):
self.term.arguments = tuple(args)
self.arguments = tuple(args)
def set_function(self, function: Function):
self.function = function
self.term.function = function
def __hash__(self):
return hash((self.group, self.term))
def __repr__(self):
return repr(self.term)
def __str__(self):
return str(self.term)
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.term == x.term
class GroupConstant(GroupElement, Constant):
def __init__(self, g: Group, symbol: str):
GroupElement.__init__(self, g)
Constant.__init__(self, symbol)
def __hash__(self):
return hash((self.group, self.symbol))
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.symbol == x.symbol
class AbelianGroup(Group):
def __init__(self, name: str, operation: ACFunction, inv_symbol=None,
identity_symbol='e'):
if not isinstance(operation, ACFunction):
raise ValueError(
'operation must be associative and commutative (ACFunction)')
super().__init__(name, operation, inv_symbol=inv_symbol,
identity_symbol=identity_symbol)
|
from symcollab.algebra import *
from .ac import *
from copy import deepcopy
class Group:
def __init__(self, name: str, operation: AFunction, inv_symbol=None,
identity_symbol='e'):
if not isinstance(operation, AFunction):
raise ValueError('operation must be associative (AFunction)')
self.name = name
self.identity = GroupConstant(self, identity_symbol)
self.inv = GroupInverseFunction(self, name + '_inv' if inv_symbol is
None else inv_symbol)
self.op = GroupFunction(self, operation)
def __hash__(self):
return hash(self.name)
def __eq__(self, x):
return type(self) is type(x
) and self.name == x.name and self.op == x.op
class GroupInverseFunction(Function):
def __init__(self, g: Group, symbol: str):
super().__init__(symbol, 1)
self.group = g
def __call__(self, x):
if x == self.group.identity:
return deepcopy(self.group.identity)
if isinstance(x, FuncTerm) and isinstance(x.function,
GroupInverseFunction):
return x.arguments[0]
return FuncTerm(self, (x,))
class GroupFunction(Function):
def __init__(self, g: Group, f: Function):
super().__init__(f.symbol, f.arity)
self.group = g
self.function = f
def __call__(self, *args):
term = self.function(*args)
if not isinstance(term, FuncTerm) or term.function.arity == 0:
return deepcopy(term)
result = GroupFuncTerm(self.group, term)
result.set_function(self)
return result
class GroupElement:
def __init__(self, g: Group):
self.group = g
def _groupmulprops(self, x):
if x == self.group.identity:
return True, deepcopy(self)
if self == self.group.identity:
return True, deepcopy(x)
if self.group.inv(self) == x or self == self.group.inv(x):
return True, deepcopy(self.group.identity)
return False, None
def __mul__(self, x):
if isinstance(x, SubstituteTerm):
return NotImplemented
matched, term = self._groupmulprops(x)
result = self.group.op(self, x) if not matched else term
return self.group.simplify_term(result) if hasattr(self.group,
'simplify_term') else result
def __rmul__(self, x):
matched, term = self._groupmulprops(x)
result = self.group.op(x, self) if not matched else term
return self.group.simplify_term(result) if hasattr(self.group,
'simplify_term') else result
def __truediv__(self, x):
return self.__mul__(self.group.inv(x))
def __rtruediv__(self, x):
return self.__rmul__(self.group.inv(x))
class GroupVariable(GroupElement, Variable):
def __init__(self, g: Group, symbol: str):
GroupElement.__init__(self, g)
Variable.__init__(self, symbol)
def __hash__(self):
return hash((self.group, self.symbol))
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.symbol == x.symbol
class GroupFuncTerm(GroupElement, FuncTerm):
def __init__(self, g: Group, a_term: ATerm):
GroupElement.__init__(self, g)
FuncTerm.__init__(self, a_term.function, a_term.arguments)
self.term = a_term
def set_arguments(self, args):
self.term.arguments = tuple(args)
self.arguments = tuple(args)
def set_function(self, function: Function):
self.function = function
self.term.function = function
def __hash__(self):
return hash((self.group, self.term))
def __repr__(self):
return repr(self.term)
def __str__(self):
return str(self.term)
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.term == x.term
class GroupConstant(GroupElement, Constant):
def __init__(self, g: Group, symbol: str):
GroupElement.__init__(self, g)
Constant.__init__(self, symbol)
def __hash__(self):
return hash((self.group, self.symbol))
def __eq__(self, x):
return type(self) is type(x
) and self.group == x.group and self.symbol == x.symbol
class AbelianGroup(Group):
def __init__(self, name: str, operation: ACFunction, inv_symbol=None,
identity_symbol='e'):
if not isinstance(operation, ACFunction):
raise ValueError(
'operation must be associative and commutative (ACFunction)')
super().__init__(name, operation, inv_symbol=inv_symbol,
identity_symbol=identity_symbol)
|
from symcollab.algebra import *
from .ac import *
from copy import deepcopy
# This is a single arity function which only actually gets applied when called an odd number of times
# Useful for the inverse function later on
# A group G is an algebraic structure which satisfies the following properties
# (1) G is closed under the operation
# (2) The operation is associative
# (3) An identity element exists [that is, op(x, id) = x for all x in G]
# (4) An inverse exists for each element
class Group:
def __init__(self, name : str, operation : AFunction, inv_symbol = None, identity_symbol = "e"):
if not isinstance(operation, AFunction):
raise ValueError("operation must be associative (AFunction)")
self.name = name
self.identity = GroupConstant(self, identity_symbol)
self.inv = GroupInverseFunction(self, name + "_inv" if inv_symbol is None else inv_symbol)
self.op = GroupFunction(self, operation)
def __hash__(self):
return hash(self.name)
def __eq__(self, x):
return type(self) is type(x) and self.name == x.name and self.op == x.op
class GroupInverseFunction(Function):
def __init__(self, g : Group, symbol : str):
super().__init__(symbol, 1)
self.group = g
def __call__(self, x):
# The inverse of zero in a group is zero
if x == self.group.identity:
return deepcopy(self.group.identity)
if isinstance(x, FuncTerm) and isinstance(x.function, GroupInverseFunction):
return x.arguments[0]
return FuncTerm(self, (x,))
class GroupFunction(Function):
def __init__(self, g : Group, f : Function):
super().__init__(f.symbol, f.arity)
self.group = g
self.function = f
def __call__(self, *args):
term = self.function(*args)
# Important for function calls that returns only a constant
if not isinstance(term, FuncTerm) or term.function.arity == 0:
return deepcopy(term)
result = GroupFuncTerm(self.group, term)
result.set_function(self)
return result
# Class that describes an element of the group.
# FuncTerms, Constants, and Variables all inherit from this group
# Multiplication is defined so that you can multiply elements like a * b
class GroupElement:
def __init__(self, g : Group):
self.group = g
# Properties of multiplication return (True, result) if one matches otherwise (false, None)
def _groupmulprops(self, x):
if x == self.group.identity:
return (True, deepcopy(self))
if self == self.group.identity:
return (True, deepcopy(x))
if self.group.inv(self) == x or self == self.group.inv(x):
return (True, deepcopy(self.group.identity))
return (False, None)
def __mul__(self, x):
# To get around the problem with Substitute Terms
if isinstance(x, SubstituteTerm):
return NotImplemented
matched, term = self._groupmulprops(x)
result = self.group.op(self, x) if not matched else term
return self.group.simplify_term(result) if hasattr(self.group, 'simplify_term') else result
def __rmul__(self, x):
matched, term = self._groupmulprops(x)
result = self.group.op(x, self) if not matched else term
return self.group.simplify_term(result) if hasattr(self.group, 'simplify_term') else result
# a / b is defined as a * inv(b)
def __truediv__(self, x):
return self.__mul__(self.group.inv(x))
def __rtruediv__(self, x):
return self.__rmul__(self.group.inv(x))
class GroupVariable(GroupElement, Variable):
def __init__(self, g : Group, symbol : str):
GroupElement.__init__(self, g)
Variable.__init__(self, symbol)
def __hash__(self):
return hash((self.group, self.symbol))
def __eq__(self, x):
return type(self) is type(x) and self.group == x.group and self.symbol == x.symbol
class GroupFuncTerm(GroupElement, FuncTerm):
def __init__(self, g : Group, a_term : ATerm):
GroupElement.__init__(self, g)
FuncTerm.__init__(self, a_term.function, a_term.arguments)
self.term = a_term
def set_arguments(self, args):
self.term.arguments = tuple(args)
self.arguments = tuple(args)
def set_function(self, function : Function):
self.function = function
self.term.function = function
def __hash__(self):
return hash((self.group, self.term))
def __repr__(self):
return repr(self.term)
def __str__(self):
return str(self.term)
def __eq__(self, x):
return type(self) is type(x) and self.group == x.group and self.term == x.term
class GroupConstant(GroupElement, Constant):
def __init__(self, g : Group, symbol : str):
GroupElement.__init__(self, g)
Constant.__init__(self, symbol)
def __hash__(self):
return hash((self.group, self.symbol))
def __eq__(self, x):
return type(self) is type(x) and self.group == x.group and self.symbol == x.symbol
# An abelian group is a group where the operation is also commutative
class AbelianGroup(Group):
def __init__(self, name : str, operation : ACFunction, inv_symbol = None, identity_symbol = "e"):
if not isinstance(operation, ACFunction):
raise ValueError("operation must be associative and commutative (ACFunction)")
super().__init__(name, operation, inv_symbol = inv_symbol, identity_symbol = identity_symbol)
|
[
23,
29,
33,
36,
37
] |
2,491 |
994210b3de82af02ec7b1b7bee75ceb88ffb2bd5
|
HORIZONTAL_TABLE = b'\x09'
class ReagentInfoItem():
'''
This class if defined for a single reagent info unit, from the table's view, its a cell of the table.
'''
def __init__(self, reagent_name, reagent_count):
self.reagent_name = reagent_name
self.reagent_count = reagent_count
def __repr__(self):
return 'reagent name: ' + self.reagent_name + HORIZONTAL_TABLE +\
'reagent count: ' + str(self.reagent_count)
class InstrumentReagentInfo():
'''
This class is defined for single instrument,from the table's view, its a column of the reagent info table.
'''
def __init__(self, instr_id, instr_type, time_stamp=None, reagent_info_list=[]):
'''
Instrument_Id: str
Instrument_Type: str
Reagent_Info_List: ReagentInfoItem[]
'''
self.instrument_id = instr_id
self.instrument_type = instr_type
self.time_stamp = time_stamp
self.reagent_info_list = reagent_info_list
def __repr__(self):
return 'instrument id: '+ self.instrument_id + HORIZONTAL_TABLE +\
'instrument type: ' + self.instrument_type + HORIZONTAL_TABLE+\
'updated timestamp: ' + str(self.time_stamp) + HORIZONTAL_TABLE+\
'\nreagent inventory info:\n' + '\n'.join(str(item) for item in self.reagent_info_list)
class SystemReagentInfo():
'''
Reagent information of the whole system
'''
def __init__(self):
self.system_reagent = []
def update_instrument_reagent_inventory(self,instrument_reagent_invemtory):
if isinstance(instrument_reagent_invemtory,InstrumentReagentInfo):
if not self.get_last_update_timestamp_per_instrument(instrument_reagent_invemtory.instrument_id) or \
self.get_last_update_timestamp_per_instrument(instrument_reagent_invemtory.instrument_id)<instrument_reagent_invemtory.time_stamp:
old_record = self.get_instrument_reagent_inventory_item_by_id(instrument_reagent_invemtory.instrument_id)
if old_record:
old_record = instrument_reagent_invemtory
else:
self.system_reagent.append(instrument_reagent_invemtory)
def get_instrument_reagent_inventory_item_by_id(self,instr_id):
for item in self.system_reagent:
if isinstance(item,InstrumentReagentInfo):
if item.instrument_id == instr_id:
return item
def get_last_update_timestamp_per_instrument(self,instr_id):
for item in self.system_reagent:
if isinstance(item,InstrumentReagentInfo):
if item.instrument_id == instr_id:
return item.time_stamp
def __repr__(self):
return 'system reagent info:\n' +'\n'.join(str(item) for item in self.system_reagent)
def test01():
ReagentInfoItem11 = ReagentInfoItem('dai', 12)
ReagentInfoItem12 = ReagentInfoItem('han', 13)
ReagentInfoItem13 = ReagentInfoItem('peng', 14)
ReagentInfoList1 = [ReagentInfoItem11, ReagentInfoItem12, ReagentInfoItem13]
ReagentInfoItem21 = ReagentInfoItem('I', 32)
ReagentInfoItem22 = ReagentInfoItem('love', 33)
ReagentInfoItem23 = ReagentInfoItem('python', 34)
ReagentInfoList2 = [ReagentInfoItem21, ReagentInfoItem22, ReagentInfoItem23]
# 'normal testing, below info should be updated:'
InstrumentInfo1 = InstrumentReagentInfo('5', 'A24', '20160101110909', ReagentInfoList1)
InstrumentInfo2 = InstrumentReagentInfo('7', 'CEN', '20151212090923', ReagentInfoList2)
# 'abnormal testing, below info should not be updated:'
InstrumentInfo3 = InstrumentReagentInfo('5', 'A24', '20150101110909', ReagentInfoList2)
aptioReagentInfo = SystemReagentInfo()
aptioReagentInfo.update_instrument_reagent_inventory(InstrumentInfo1)
aptioReagentInfo.update_instrument_reagent_inventory(InstrumentInfo2)
aptioReagentInfo.update_instrument_reagent_inventory(InstrumentInfo3)
print aptioReagentInfo
def test02():
from datetime import datetime
dt1 = '20141117100340'
dt = datetime.strptime(dt1,'%Y%m%d%H%M%S')
print dt < None
if __name__ == '__main__':
test02()
| null | null | null | null |
[
0
] |
2,492 |
a9df8e45c8b5068aeec2b79e21de6217a3103bb4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import requests
from bs4 import BeautifulSoup
url = "http://javmobile.net/?s=julia"
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
imgs = soup.find_all("img" , {"class": "entry-thumb"})
images = []
titles = []
srcs = []
for img in imgs:
images.append(img.get("src"))
titles.append(img.get("title"))
srcs.append(img.get("href"))
videos = []
for src in srcs:
url2 = "http://javmobile.net/censored/oppai/pppd-524-spence-mammary-gland-development-clinic-special-julia.html"
r2 = requests.get(url2)
soup2 = BeautifulSoup(r2.content, "html.parser")
jsonList = {}
for i in range(0,len(images)):
jsonList.append({"name" : titles[i], "thumb": images[i]})
print jsonList
| null | null | null | null |
[
0
] |
2,493 |
064792a6aba96a679bec606a85b19d4925861f7d
|
<mask token>
class AppendTrailingSlashHandler(webapp2.RequestHandler):
def get(self, uri):
self.response.set_status(301)
redirect_uri = uri + '/'
self.response.headers['Location'] = redirect_uri
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(redirect_uri)
<mask token>
|
<mask token>
class RedirectToSiteRootHandler(webapp2.RequestHandler):
<mask token>
class AppendTrailingSlashHandler(webapp2.RequestHandler):
def get(self, uri):
self.response.set_status(301)
redirect_uri = uri + '/'
self.response.headers['Location'] = redirect_uri
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(redirect_uri)
<mask token>
|
<mask token>
class RedirectToSiteRootHandler(webapp2.RequestHandler):
def get(self):
self.response.set_status(301)
self.response.headers['Location'] = '/'
class AppendTrailingSlashHandler(webapp2.RequestHandler):
def get(self, uri):
self.response.set_status(301)
redirect_uri = uri + '/'
self.response.headers['Location'] = redirect_uri
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(redirect_uri)
<mask token>
|
import webapp2
class RedirectToSiteRootHandler(webapp2.RequestHandler):
def get(self):
self.response.set_status(301)
self.response.headers['Location'] = '/'
class AppendTrailingSlashHandler(webapp2.RequestHandler):
def get(self, uri):
self.response.set_status(301)
redirect_uri = uri + '/'
self.response.headers['Location'] = redirect_uri
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(redirect_uri)
app = webapp2.WSGIApplication([('/blog', RedirectToSiteRootHandler), (
'/blog/', RedirectToSiteRootHandler), ('(.*[^/])',
AppendTrailingSlashHandler)], debug=True)
|
import webapp2
class RedirectToSiteRootHandler(webapp2.RequestHandler):
def get(self):
self.response.set_status(301)
self.response.headers['Location'] = '/'
class AppendTrailingSlashHandler(webapp2.RequestHandler):
def get(self, uri):
self.response.set_status(301)
redirect_uri = uri + '/'
self.response.headers['Location'] = redirect_uri
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(redirect_uri)
app = webapp2.WSGIApplication([
('/blog', RedirectToSiteRootHandler),
('/blog/', RedirectToSiteRootHandler),
('(.*[^/])', AppendTrailingSlashHandler),
], debug=True)
|
[
2,
3,
4,
6,
7
] |
2,494 |
6ad939ab541562efdaacb8b56865e76d1745176a
|
#!/usr/bin/env python
# Ben Suay, RAIL
# May 2013
# Worcester Polytechnic Institute
#
# http://openrave.org/docs/latest_stable/command_line_tools/
# openrave-robot.py /your/path/to/your.robot.xml --info=joints
# On that page you can find more examples on how to use openrave-robot.py.
from openravepy import *
import sys
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
import numpy
import time
from rodrigues import *
from TransformMatrix import *
from str2num import *
from TSR import *
from math import *
from copy import *
import os # for file operations
from RaveCBiRRT import *
from base_wheel_turning import *
class HuboPlusWheelTurning( BaseWheelTurning ):
def __init__(self,
HuboModelPath = '../../openHubo/huboplus/rlhuboplus.robot.xml',
WheelModelPath = '../../../drc_common/models/driving_wheel.robot.xml' ):
BaseWheelTurning.__init__( self, HuboModelPath, WheelModelPath )
# Set those variables to show or hide the interface
# Do it using the member functions
self.StopAtKeyStrokes = False
self.ShowUserInterface = False
self.ViewerStarted = False
# Right Hand Joints
# Open - Closed Values
self.rhanddofs = range(27,42)
self.rhandclosevals = [0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0, 0, 1.2]
self.rhandopenvals = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08]
# Left Hand Joints
self.lhanddofs = range(42,57)
self.lhandclosevals = [0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0, 0, 1.2]
self.lhandopenvals = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08]
def SetRobotConfiguration(self,jointValues):
print "SetRobotConfiguration"
values = []
values.append( jointValues['HPY'] ) # 0
values.append( jointValues['RHY'] ) # 1
values.append( jointValues['LHY'] ) # 2
values.append( jointValues['RHR'] ) # 3
values.append( jointValues['HPY'] ) # 4
values.append( jointValues['LHR'] ) # 5
values.append( jointValues['LHP'] ) # 6
values.append( jointValues['RKP'] ) # 7
values.append( jointValues['LKP'] ) # 8
values.append( jointValues['RAP'] ) # 9
values.append( jointValues['LAP'] ) # 10
values.append( jointValues['RAR'] ) # 11
values.append( jointValues['LAR'] ) # 12
values.append( jointValues['RSP'] ) # 13
values.append( jointValues['LSP'] ) # 14
values.append( jointValues['RSR'] ) # 15
values.append( jointValues['LSR'] ) # 16
values.append( jointValues['RSY'] ) # 17
values.append( jointValues['LSY'] ) # 18
values.append( jointValues['REP'] ) # 19
values.append( jointValues['LEP'] ) # 20
values.append( jointValues['RWY'] ) # 21
values.append( jointValues['LWY'] ) # 22
values.append( jointValues['RWP'] ) # 23
values.append( jointValues['LWP'] ) # 24
values.append( jointValues['HNR'] ) # 25
values.append( jointValues['HNP'] ) # 26
for i in range(27,57):
values.append(0)
# values.append( jointValues['rightIndexKnuckle2'] ) # 27
# values.append( jointValues['rightIndexKnuckle3'] ) # 28
# values.append( jointValues['rightIndexKnuckle1'] ) # 29
# values.append( jointValues['rightMiddleKnuckle2'] ) # 30
# values.append( jointValues['rightMiddleKnuckle3'] ) # 31
# values.append( jointValues['rightMiddleKnuckle1'] ) # 32
# values.append( jointValues['rightRingKnuckle2'] ) # 33
# values.append( jointValues['rightRingKnuckle3'] ) # 34
# values.append( jointValues['rightRingKnuckle1'] ) # 35
# values.append( jointValues['rightPinkyKnuckle2'] ) # 36
# values.append( jointValues['rightPinkyKnuckle3'] ) # 37
# values.append( jointValues['rightPinkyKnuckle1'] ) # 38
# values.append( jointValues['rightThumbKnuckle2'] ) # 39
# values.append( jointValues['rightThumbKnuckle3'] ) # 40
# values.append( jointValues['rightThumbKnuckle1'] ) # 41
# values.append( jointValues['leftIndexKnuckle2'] ) # 42
# values.append( jointValues['leftIndexKnuckle3'] ) # 43
# values.append( jointValues['leftIndexKnuckle1'] ) # 44
# values.append( jointValues['leftMiddleKnuckle2'] ) # 45
# values.append( jointValues['leftMiddleKnuckle3'] ) # 46
# values.append( jointValues['leftMiddleKnuckle1'] ) # 47
# values.append( jointValues['leftRingKnuckle2'] ) # 48
# values.append( jointValues['leftRingKnuckle3'] ) # 49
# values.append( jointValues['leftRingKnuckle1'] ) # 50
# values.append( jointValues['leftPinkyKnuckle2'] ) # 51
# values.append( jointValues['leftPinkyKnuckle3'] ) # 52
# values.append( jointValues['leftPinkyKnuckle1'] ) # 53
# values.append( jointValues['leftThumbKnuckle2'] ) # 54
# values.append( jointValues['leftThumbKnuckle3'] ) # 55
# values.append( jointValues['leftThumbKnuckle1'] ) # 56
self.robotid.SetDOFValues( values )
def Run(self):
self.RemoveFiles()
# This is a list of handles of the objects that are
# drawn on the screen in OpenRAVE Qt-Viewer.
# Keep appending to the end, and pop() if you want to delete.
handles = []
normalsmoothingitrs = 150;
fastsmoothingitrs = 20;
self.StartViewerAndSetWheelPos( handles )
# Wheel Joint Index
crankjointind = 0
# Set the wheel joints back to 0 for replanning
self.crankid.SetDOFValues([0],[crankjointind])
self.crankid.GetController().Reset(0)
manips = self.robotid.GetManipulators()
crankmanip = self.crankid.GetManipulators()
try:
cbirrtHubo = RaveCBiRRT(self.env,'rlhuboplus')
cbirrtWheel = RaveCBiRRT(self.env,'crank')
except openrave_exception, e:
print e
return []
# Keep Active Joint Indices
# Note that 0 is the driving wheel
#activedofs = [0]
activedofs = []
for m in manips:
# print m.GetArmIndices()
activedofs.extend(m.GetArmIndices())
# Sort Active Joint Indices
activedofs.sort()
#print activedofs
# Set Elbows and Thumbs Joint Values
self.robotid.SetDOFValues([-0.95,-0.95,1,1],[19,20,41,56])
self.robotid.SetActiveDOFs(activedofs)
# Current configuration of the robot is its initial configuration
initconfig = self.robotid.GetActiveDOFValues()
print "robot init config : "
print initconfig
# List of Robot Links
links = self.robotid.GetLinks()
# List of Wheel (Crank Links)
cranklinks = self.crankid.GetLinks()
# End Effector Transforms
Tee = []
for i in range(len(manips)):
# Returns End Effector Transform in World Coordinates
Tlink = manips[i].GetEndEffectorTransform()
Tee.append(Tlink)
# Get Transformation Matrix for the Wheel
# Note that crank's links are not rotated
# If you want use the wheel's end effector's transformation
# matrix (which is 23 degrees tilted) then see
# CTee matrix below.
#
# crank has two links:
# 0) pole - the blue cylinder in the model, and,
# 1) crank - the driving wheel itself.
jointtm = cranklinks[0].GetTransform()
# handles.append(misc.DrawAxes(env,matrix(jointtm),1))
# We can also get the transformation matrix
# with the following command as a string
jointtm_str = cbirrtHubo.solve('GetJointTransform name crank jointind '+str(crankjointind))
# And then we can convert the string to a 1x12 array
jointtm_str = jointtm_str.replace(" ",",")
jointtm_num = eval('['+jointtm_str+']')
# In this script we will use jointtm.
# jointtm_str and jointtm_num are given as example.
# Crank Transform End Effector in World Coordinates
# This is the transformation matrix of the end effector
# named "dummy" in the xml file.
# Note that dummy is tilted 23 degress around its X-Axis
CTee = crankmanip[0].GetEndEffectorTransform()
tilt_angle_deg = acos(dot(linalg.inv(CTee),jointtm)[1,1])*180/pi
tilt_angle_rad = acos(dot(linalg.inv(CTee),jointtm)[1,1])
# Center of Gravity Target
cogtarg = [-0.05, 0.085, 0]
#if self.ShowUserInterface :
#cogtm = MakeTransform(rodrigues([0,0,0]),transpose(matrix(cogtarg)))
#handles.append(misc.DrawAxes(self.env,cogtm,1))
# polyscale: changes the scale of the support polygon
# polytrans: shifts the support polygon around
footlinknames = ' Body_RAR Body_LAR polyscale 0.5 0.5 0 polytrans -0.015 0 0 '
#footlinknames = ' Body_RAR Body_LAR polyscale 0.7 0.5 0 polytrans -0.015 0 0 '
#footlinknames = ' Body_RAR Body_LAR polyscale 1.0 1.0 0 polytrans 0 0 0 '
# What is this?
handrot = rodrigues([0,-pi/2,0])
# Translation Offset from the wheel center for the hands
transoffset = [0, 0.15, 0];
# Figure out where to put the left hand on the wheel
temp = dot(CTee, MakeTransform(rodrigues([-pi/2,0,0]),transpose(matrix([0,0,0]))))
temp = dot(temp, MakeTransform(rodrigues([0,0,-pi/2]),transpose(matrix([0,0,0]))))
# Left Hand Pose in World Coordinates
T0_LH1 = dot(temp, MakeTransform(rodrigues([0,0,0]),transpose(matrix([0,0.15,0]))))
# Uncomment if you want to see where T0_LH1 is
# handles.append(misc.DrawAxes(env,matrix(T0_LH1),1))
# Figure out where to put the right hand on the wheel
temp = dot(CTee, MakeTransform(rodrigues([-pi/2,0,0]),transpose(matrix([0,0,0]))))
temp = dot(temp, MakeTransform(rodrigues([0,0,-pi/2]),transpose(matrix([0,0,0]))))
# Right Hand Pose in World Coordinates
T0_RH1 = dot(temp, MakeTransform(rodrigues([0,0,0]),transpose(matrix([0,-0.15,0]))))
# Uncomment if you want to see where T0_RH1 is
# handles.append(misc.DrawAxes(env,matrix(T0_RH1),1))
# Define Task Space Region strings
# Left Hand
TSRString1 = SerializeTSR(0,'NULL',T0_LH1,eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))
# Right Hand
TSRString2 = SerializeTSR(1,'NULL',T0_RH1,eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))
# Left Foot
TSRString3 = SerializeTSR(2,'NULL',Tee[2],eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))
# Head
# Grasp transform in Head coordinates
Tw0_eH = eye(4)
# How much freedom do we want to give to the Head
# [x,x,y,y,z,z,R,R,P,P,Y,Y]
Bw0H = matrix([0,0,-0.1,0.1,-0.1,0.01,0,0,0,0,0,0])
TSRString4 = SerializeTSR(4,'NULL',Tee[4],Tw0_eH,Bw0H)
# We defined Task Space Regions. Now let's concatenate them.
TSRChainStringGrasping = SerializeTSRChain(0,1,0,1,TSRString1,'NULL',[])+' '+SerializeTSRChain(0,1,0,1,TSRString2,'NULL',[])+' '+SerializeTSRChain(0,1,1,1,TSRString3,'NULL',[])+' '+SerializeTSRChain(0,1,1,1,TSRString4,'NULL',[])
if( self.StopAtKeyStrokes ):
print "Press Enter to plan initconfig --> startik"
sys.stdin.readline()
# Get a trajectory from initial configuration to grasp configuration
with self.robotid:
try:
answer = cbirrtHubo.solve('RunCBiRRT psample 0.2 supportlinks 2 '+footlinknames+' smoothingitrs '+str(normalsmoothingitrs)+' '+TSRChainStringGrasping)
print "RunCBiRRT answer: ",str(answer)
except openrave_exception, e:
print "Cannot send command RunCBiRRT: "
print e
return []
try:
os.rename("cmovetraj.txt","movetraj0.txt")
except OSError, e:
# No file cmovetraj
print e
return []
# The following is the same as commented out try-except section
traj = RaveCreateTrajectory(self.env,'').deserialize(open('movetraj0.txt','r').read())
self.robotid.GetController().SetPath(traj)
self.robotid.WaitForController(0)
self.robotid.GetController().Reset(0)
# Reset(0) releases the controller, otherwise after calling
# SetPath the robot controller actively holds the trajectory's final joint values
# Instead of 4 lines above, we could use the following block
# to play the trajectory
#
# try:
# answer= cbirrtHubo.solve('traj movetraj0.txt');
# robotid.WaitForController(0)
# sys.stdin.readline()
# # debug
# print "traj call answer: ",str(answer)
# except openrave_exception, e:
# print e
# Get the current configuration of the robot
# and assign it to startik (start of the wheel
# rotation path).
startik = self.robotid.GetActiveDOFValues()
# Left Hand's index is less than the right hand.
# Hence it is evaluated first by the CBiRRT Module.
# That's why We need to define the right hand's
# transform relative to the wheel (ask Dmitry Berenson
# about this for more information).
temp1 = MakeTransform(rodrigues([-pi/2,0,0]),transpose(matrix([0,0,0])))
temp2 = MakeTransform(rodrigues([0,0,-pi/2]),transpose(matrix([0,0,0])))
# Rotate the wheel's transform to a suitable pose
# for the Left Hand
# T0_w0L stands for:
# left hand's transform on wheel in world coordinates
T0_w0L = dot(dot(CTee,temp1),temp2)
# This is what's happening:
#
# Tw0L_0 = linalg.inv(T0_w0L)
# Tw0L_LH1 = Tw0L_0*T0_LH1
#
# Left hand's transform in wheel's coordinates
Tw0L_LH1 = dot(linalg.inv(T0_w0L),T0_LH1)
# Transform of the left hand's end effector in wheel's coords.
# Required by CBiRRT
Tw0_eL = Tw0L_LH1
# How much freedom do we want to give to the left hand
Bw0L = matrix([0,0,0,0,0,0,0,pi,0,0,0,0])
# Right Hand's transforms:
T0_crankcrank = self.crankid.GetManipulators()[0].GetTransform()
T0_w0R = MakeTransform(rodrigues([tilt_angle_rad,0,0]),transpose(matrix([0,0,0])))
# End effector transform in wheel coordinates
Tw0_eR = dot(linalg.inv(T0_crankcrank),T0_RH1)
#handles.append(misc.DrawAxes(env,matrix(Tw0_eR),1))
# How much freedom? (note: in frame of crank)
Bw0R = matrix([0,0,0,0,0,0,0,0,0,0,0,0])
# Head's transforms:
T0_w0H = Tee[4]
Tw0_eH = eye(4);
Bw0H = matrix([-0.05,0.05,-0.1,0.1,-100,100,-pi,pi,-pi,pi,-pi,pi])
# Define Task Space Regions
# Left Hand
TSRString1 = SerializeTSR(0,'NULL',T0_w0L,Tw0_eL,Bw0L)
# Right Hand
TSRString2 = SerializeTSR(1,'crank crank',T0_w0R,Tw0_eR,Bw0R)
# Left Foot
TSRString3 = SerializeTSR(2,'NULL',Tee[2],eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))
# Head
TSRString4 = SerializeTSR(4,'NULL',T0_w0H,Tw0_eH,Bw0H)
TSRChainStringFootOnly = SerializeTSRChain(0,0,1,1,TSRString3,'NULL',[])
TSRChainStringFootandHead = TSRChainStringFootOnly+' '+SerializeTSRChain(0,0,1,1,TSRString4,'NULL',[])
TSRChainStringTurning = SerializeTSRChain(0,0,1,1,TSRString1,'crank',matrix([crankjointind]))+' '+SerializeTSRChain(0,0,1,1,TSRString2,'NULL',[])+' '+TSRChainStringFootandHead
# Calculate hand transforms after rotating the wheel (they will help us find the goalik):
# How much do we want to rotate the wheel?
crank_rot = pi/6.5
# Which joint do we want the CBiRRT to mimic the TSR for?
TSRChainMimicDOF = 1
# Create the transform for the wheel that we would like to reach to
Tcrank_rot = MakeTransform(rodrigues([crank_rot,0,0]),transpose(matrix([0,0,0])))
# What is this?
temp = MakeTransform(rodrigues([0,0,crank_rot]),transpose(matrix([0,0,0])))
# Rotate the left hand's transform on the wheel in world transform "crank_rot" radians around it's Z-Axis
T0_cranknew = dot(T0_w0L,Tcrank_rot)
# Where will the left hand go after turning the wheel?
# This is what's happening:
#
# Tcranknew_LH2 = dot(Tw0L_0,T0_LH1) --> Left hand in wheel's coordinate
# T0_LH2 = dot(T0_cranknew,Tcranknew_LH2) --> Left hand rotated around wheel's origin
T0_LH2 = dot(T0_cranknew,dot(linalg.inv(T0_w0L),T0_LH1))
# Uncomment to see T0_LH2
# handles.append(misc.DrawAxes(env,matrix(T0_LH2),1))
# Where will the right hand go after turning the wheel?
T0_RH2 = dot(T0_crankcrank,dot(temp,dot(linalg.inv(T0_crankcrank),T0_RH1)))
# Uncomment to see T0_RH2
# handles.append(misc.DrawAxes(env,matrix(T0_RH2),1))
arg1 = str(cogtarg).strip("[]").replace(', ',' ')
arg2 = trans_to_str(T0_LH2)
arg3 = trans_to_str(T0_RH2)
arg4 = trans_to_str(Tee[2])
# print arg1
# print arg2
# print arg3
# print arg4
if( self.StopAtKeyStrokes ):
print "Press Enter to find a goalIK"
sys.stdin.readline()
self.crankid.SetDOFValues([crank_rot],[crankjointind])
goalik = cbirrtHubo.solve('DoGeneralIK exec supportlinks 2 '+footlinknames+' movecog '+arg1+' nummanips 3 maniptm 0 '+arg2+' maniptm 1 '+arg3+' maniptm 2 '+arg4)
# print "goalIK"
# print goalik
self.robotid.SetActiveDOFValues(str2num(goalik))
self.crankid.SetDOFValues([crank_rot],[crankjointind])
if( self.StopAtKeyStrokes ):
print "Press Enter to go to startik"
sys.stdin.readline()
# Get a trajectory from goalik to grasp configuration
goaljoints = deepcopy(goalik)
for i in range(TSRChainMimicDOF):
goaljoints += ' 0'
goaljoints = str2num(goaljoints)
self.robotid.SetActiveDOFValues(startik)
time.sleep(0.5)
self.robotid.SetDOFValues(self.rhandclosevals,self.rhanddofs)
self.robotid.SetDOFValues(self.lhandclosevals,self.lhanddofs)
# Close hands to start "turning" the wheel
self.crankid.SetDOFValues([0],[crankjointind])
time.sleep(0.5)
if( self.StopAtKeyStrokes ):
print "Press Enter to plan startik --> goalik (DMITRY!!!)"
sys.stdin.readline()
print self.robotid.GetActiveDOFValues()
print TSRChainStringTurning
try:
answer = cbirrtHubo.solve('RunCBiRRT supportlinks 2 '+footlinknames+' smoothingitrs '+str(fastsmoothingitrs)+' jointgoals '+str(len(goaljoints))+' '+Serialize1DMatrix(matrix(goaljoints))+' '+TSRChainStringTurning)
print "RunCBiRRT answer: ",str(answer)
except openrave_exception, e:
print "Cannot send command RunCBiRRT: "
print e
return []
try:
os.rename("cmovetraj.txt","movetraj1.txt")
except OSError, e:
# No file cmovetraj
print e
return []
# The following is the same as commented out try-except section
# traj = RaveCreateTrajectory(env,'').deserialize(open('movetraj1.txt','r').read())
# robotid.GetController().SetPath(traj)
# crankid.GetController().SetPath(traj)
# robotid.WaitForController(0)
# crankid.WaitForController(0)
# robotid.GetController().Reset(0)
# crankid.GetController().Reset(0)
try:
answer= cbirrtHubo.solve('traj movetraj1.txt');
answer= cbirrtWheel.solve('traj movetraj1.txt');
self.robotid.WaitForController(0)
# debug
print "traj call answer: ",str(answer)
except openrave_exception, e:
print e
return []
self.robotid.GetController().Reset(0)
self.robotid.SetDOFValues(self.rhandopenvals,self.rhanddofs)
self.robotid.SetDOFValues(self.lhandopenvals,self.lhanddofs)
self.robotid.SetActiveDOFValues(str2num(goalik))
time.sleep(2)
if( self.StopAtKeyStrokes ):
print "Press Enter to plan goalik --> startik "
sys.stdin.readline()
goaljoints = startik
print self.robotid.GetActiveDOFValues()
print TSRChainStringFootandHead
try:
answer = cbirrtHubo.solve('RunCBiRRT supportlinks 2 '+footlinknames+' smoothingitrs '+str(normalsmoothingitrs)+' jointgoals '+str(len(goaljoints))+' '+Serialize1DMatrix(matrix(goaljoints))+' '+TSRChainStringFootandHead)
print "RunCBiRRT answer: ",str(answer)
except openrave_exception, e:
print "Cannot send command RunCBiRRT: "
print e
return []
try:
os.rename("cmovetraj.txt","movetraj2.txt")
except OSError, e:
# No file cmovetraj
print e
return []
try:
answer= cbirrtHubo.solve('traj movetraj2.txt');
self.robotid.WaitForController(0)
# debug
print "traj call answer: ",str(answer)
except openrave_exception, e:
print e
return []
self.robotid.GetController().Reset(0)
#self.robotid.SetDOFValues(rhandclosevals,rhanddofs)
#self.robotid.SetDOFValues(lhandclosevals,lhanddofs)
self.robotid.SetActiveDOFValues(startik)
time.sleep(1)
if( self.StopAtKeyStrokes ):
print "Press Enter to plan startik --> initconfig "
sys.stdin.readline()
goaljoints = initconfig
print goaljoints
try:
answer = cbirrtHubo.solve('RunCBiRRT supportlinks 2 '+footlinknames+' smoothingitrs '+str(normalsmoothingitrs)+' jointgoals '+str(len(goaljoints))+' '+Serialize1DMatrix(matrix(goaljoints))+' '+TSRChainStringFootandHead)
print "RunCBiRRT answer: ",str(answer)
except openrave_exception, e:
print "Cannot send command RunCBiRRT: "
print e
return []
try:
os.rename("cmovetraj.txt","movetraj3.txt")
except OSError, e:
# No file cmovetraj
print e
return []
try:
answer= cbirrtHubo.solve('traj movetraj3.txt');
self.robotid.WaitForController(0)
# debug
print "traj call answer: ",str(answer)
except openrave_exception, e:
print e
return []
self.robotid.GetController().Reset(0)
return self.Playback()
if __name__ == "__main__":
planner = HuboPlusWheelTurning()
planner.SetViewer(True)
planner.SetStopKeyStrokes(False)
planner.Run()
planner.KillOpenrave()
| null | null | null | null |
[
0
] |
2,495 |
5d4585dc96d4ebdbc15b7382038cfea959c9a6f3
|
<mask token>
class Filter:
<mask token>
@classmethod
def get_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def get_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def show_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
plt.title('incorrect vector NO. of first {} data'.format(
example_number))
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(
incorrect_vector_mean))
plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))
plt.grid(which='minor')
plt.show()
@classmethod
def show_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
plt.title('incorrect vector NO. of all data')
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(
incorrect_vector_mean))
plt.grid()
plt.show()
<mask token>
@staticmethod
def get_total_incorrect_vector(file):
"""瞬時データに含まれる誤ベクトルの数を返す"""
data = dymod.InstantData(file)
status = data.get_data('Status')
return np.sum((status == 1) | (status == 17))
<mask token>
|
<mask token>
class Filter:
"""誤ベクトル数の確認,誤ベクトル数によるフィルタリング処理"""
@classmethod
def get_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def get_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def show_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
plt.title('incorrect vector NO. of first {} data'.format(
example_number))
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(
incorrect_vector_mean))
plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))
plt.grid(which='minor')
plt.show()
@classmethod
def show_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
plt.title('incorrect vector NO. of all data')
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(
incorrect_vector_mean))
plt.grid()
plt.show()
@staticmethod
def filter_incorrect_vector(file_list, filter_value):
"""ファイル名のリストから,誤ベクトル数がfilter_value以上のファイルの名前を除外する"""
before = len(file_list)
print('Filtering...')
total_core = mp.cpu_count()
pool = mp.Pool(total_core)
args = [(file_list, total_core, i, filter_value) for i in range(
total_core)]
callback = pool.map(parallel_task, args)
error_index_list = []
for each_error_index_list in callback:
for error_index in each_error_index_list:
error_index_list.append(error_index)
error_index_list.sort(reverse=True)
for error_index in error_index_list:
del file_list[error_index]
after = len(file_list)
print('Finish!\nFiltered data:', str(before - after) + '/' + str(
before))
return file_list
@staticmethod
def get_total_incorrect_vector(file):
"""瞬時データに含まれる誤ベクトルの数を返す"""
data = dymod.InstantData(file)
status = data.get_data('Status')
return np.sum((status == 1) | (status == 17))
<mask token>
|
<mask token>
class Filter:
"""誤ベクトル数の確認,誤ベクトル数によるフィルタリング処理"""
@classmethod
def get_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def get_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def show_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
plt.title('incorrect vector NO. of first {} data'.format(
example_number))
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(
incorrect_vector_mean))
plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))
plt.grid(which='minor')
plt.show()
@classmethod
def show_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
plt.title('incorrect vector NO. of all data')
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(
incorrect_vector_mean))
plt.grid()
plt.show()
@staticmethod
def filter_incorrect_vector(file_list, filter_value):
"""ファイル名のリストから,誤ベクトル数がfilter_value以上のファイルの名前を除外する"""
before = len(file_list)
print('Filtering...')
total_core = mp.cpu_count()
pool = mp.Pool(total_core)
args = [(file_list, total_core, i, filter_value) for i in range(
total_core)]
callback = pool.map(parallel_task, args)
error_index_list = []
for each_error_index_list in callback:
for error_index in each_error_index_list:
error_index_list.append(error_index)
error_index_list.sort(reverse=True)
for error_index in error_index_list:
del file_list[error_index]
after = len(file_list)
print('Finish!\nFiltered data:', str(before - after) + '/' + str(
before))
return file_list
@staticmethod
def get_total_incorrect_vector(file):
"""瞬時データに含まれる誤ベクトルの数を返す"""
data = dymod.InstantData(file)
status = data.get_data('Status')
return np.sum((status == 1) | (status == 17))
def parallel_task(args):
"""並列計算タスク"""
file_list, total_core, current_core, filter_value = args
file_count = len(file_list)
start = int(file_count * current_core / total_core)
end = int(file_count * (current_core + 1) / total_core) - 1
header = dymod.InstantData.get_header_row(file_list[0])
error_file_index_list = []
text = 'filtering task ' + str(current_core + 1) + '/' + str(total_core)
for i in tqdm(range(start, end), desc=text):
status = pd.read_csv(file_list[i], header=header)['Status']
if np.sum((status == 1) | (status == 17)) >= filter_value:
error_file_index_list.append(i)
return error_file_index_list
<mask token>
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
from statistics import mean
from tqdm import tqdm
import multiprocessing as mp
from . import model as dymod
class Filter:
"""誤ベクトル数の確認,誤ベクトル数によるフィルタリング処理"""
@classmethod
def get_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def get_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def show_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
plt.title('incorrect vector NO. of first {} data'.format(
example_number))
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(
incorrect_vector_mean))
plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))
plt.grid(which='minor')
plt.show()
@classmethod
def show_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
plt.title('incorrect vector NO. of all data')
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(
incorrect_vector_mean))
plt.grid()
plt.show()
@staticmethod
def filter_incorrect_vector(file_list, filter_value):
"""ファイル名のリストから,誤ベクトル数がfilter_value以上のファイルの名前を除外する"""
before = len(file_list)
print('Filtering...')
total_core = mp.cpu_count()
pool = mp.Pool(total_core)
args = [(file_list, total_core, i, filter_value) for i in range(
total_core)]
callback = pool.map(parallel_task, args)
error_index_list = []
for each_error_index_list in callback:
for error_index in each_error_index_list:
error_index_list.append(error_index)
error_index_list.sort(reverse=True)
for error_index in error_index_list:
del file_list[error_index]
after = len(file_list)
print('Finish!\nFiltered data:', str(before - after) + '/' + str(
before))
return file_list
@staticmethod
def get_total_incorrect_vector(file):
"""瞬時データに含まれる誤ベクトルの数を返す"""
data = dymod.InstantData(file)
status = data.get_data('Status')
return np.sum((status == 1) | (status == 17))
def parallel_task(args):
"""並列計算タスク"""
file_list, total_core, current_core, filter_value = args
file_count = len(file_list)
start = int(file_count * current_core / total_core)
end = int(file_count * (current_core + 1) / total_core) - 1
header = dymod.InstantData.get_header_row(file_list[0])
error_file_index_list = []
text = 'filtering task ' + str(current_core + 1) + '/' + str(total_core)
for i in tqdm(range(start, end), desc=text):
status = pd.read_csv(file_list[i], header=header)['Status']
if np.sum((status == 1) | (status == 17)) >= filter_value:
error_file_index_list.append(i)
return error_file_index_list
filtering = Filter()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
from statistics import mean
from tqdm import tqdm
import multiprocessing as mp
from . import model as dymod
class Filter:
"""誤ベクトル数の確認,誤ベクトル数によるフィルタリング処理"""
@classmethod
def get_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def get_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def show_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
# plot
plt.title('incorrect vector NO. of first {} data'.format(example_number))
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(incorrect_vector_mean))
plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))
plt.grid(which='minor')
plt.show()
@classmethod
def show_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
# plot
plt.title('incorrect vector NO. of all data')
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(incorrect_vector_mean))
plt.grid()
plt.show()
@staticmethod
def filter_incorrect_vector(file_list, filter_value):
"""ファイル名のリストから,誤ベクトル数がfilter_value以上のファイルの名前を除外する"""
before = len(file_list)
print('Filtering...')
total_core = mp.cpu_count()
pool = mp.Pool(total_core)
args = [(file_list, total_core, i, filter_value) for i in range(total_core)]
callback = pool.map(parallel_task, args)
error_index_list = []
for each_error_index_list in callback:
for error_index in each_error_index_list:
error_index_list.append(error_index)
error_index_list.sort(reverse=True)
for error_index in error_index_list:
del file_list[error_index]
after = len(file_list)
print('Finish!\nFiltered data:', str(before - after) + '/' + str(before))
return file_list
@staticmethod
def get_total_incorrect_vector(file):
"""瞬時データに含まれる誤ベクトルの数を返す"""
data = dymod.InstantData(file)
status = data.get_data('Status')
return np.sum((status == 1) | (status == 17))
def parallel_task(args):
"""並列計算タスク"""
file_list, total_core, current_core, filter_value = args
file_count = len(file_list)
start = int(file_count * current_core / total_core)
end = int(file_count * (current_core + 1) / total_core) - 1
header = dymod.InstantData.get_header_row(file_list[0])
error_file_index_list = []
text = 'filtering task ' + str(current_core + 1) + '/' + str(total_core)
for i in tqdm(range(start, end), desc=text):
status = pd.read_csv(file_list[i], header=header)['Status']
if np.sum((status == 1) | (status == 17)) >= filter_value:
error_file_index_list.append(i)
return error_file_index_list
filtering = Filter()
|
[
6,
8,
9,
11,
12
] |
2,496 |
6161653fb789040d084e475e0ae25921e2e0676b
|
<mask token>
|
<mask token>
for i in k:
if n % i == 0:
f = 1
print('YES')
break
if f == 0:
print('NO')
|
n = int(input())
k = [4, 7, 47, 74, 44, 77, 444, 447, 474, 477, 777, 774, 747, 7444]
f = 0
for i in k:
if n % i == 0:
f = 1
print('YES')
break
if f == 0:
print('NO')
|
n=int(input())
k=[4,7,47,74,44,77,444,447,474,477,777,774,747,7444]
f=0
for i in k:
if(n%i==0):
f=1
print("YES")
break;
if(f==0):
print("NO")
| null |
[
0,
1,
2,
3
] |
2,497 |
2f0dc8697e979f307c86a08832b0eae86357d416
|
<mask token>
|
<mask token>
with open(filename) as file_object:
lines = file_object.readlines()
<mask token>
for line in lines:
c_string += line.rstrip()
print(f"{c_string.replace('Python', 'Scala')}")
|
filename = 'learning_python.txt'
with open(filename) as file_object:
lines = file_object.readlines()
c_string = ''
for line in lines:
c_string += line.rstrip()
print(f"{c_string.replace('Python', 'Scala')}")
|
filename = 'learning_python.txt'
# with open(filename) as file_object:
# contents = file_object.read()
# print(contents)
# with open(filename) as file_object:
# for line in file_object:
# print(line.rstrip())
with open(filename) as file_object:
lines = file_object.readlines()
c_string = ''
for line in lines:
c_string += line.rstrip()
print(f"{c_string.replace('Python', 'Scala')}")
| null |
[
0,
1,
2,
3
] |
2,498 |
d14937aaa7a80d6b95825afa2a2d6ff8202e5f5c
|
<mask token>
|
<mask token>
print(filtered_words)
<mask token>
print(' '.join(singles))
|
stop_words = ['the', 'an', 'is', 'there']
word_list = ['we', 'are', 'the', 'students']
filtered_words = [word for word in word_list if word not in stop_words]
print(filtered_words)
<mask token>
cachedStopWords = stopwords.words('english')
<mask token>
stemmer = PorterStemmer()
test_strs = ['caresses', 'flies', 'dies', 'mules', 'denied', 'died',
'agreed', 'owned', 'humbled', 'sized', 'meeting', 'stating', 'siezing',
'itemization', 'sensational', 'traditional', 'reference', 'colonizer',
'plotted']
singles = [stemmer.stem(word) for word in test_strs]
print(' '.join(singles))
|
stop_words = ['the', 'an', 'is', 'there']
word_list = ['we', 'are', 'the', 'students']
filtered_words = [word for word in word_list if word not in stop_words]
print(filtered_words)
from nltk.corpus import stopwords
cachedStopWords = stopwords.words('english')
from nltk.stem.porter import *
stemmer = PorterStemmer()
test_strs = ['caresses', 'flies', 'dies', 'mules', 'denied', 'died',
'agreed', 'owned', 'humbled', 'sized', 'meeting', 'stating', 'siezing',
'itemization', 'sensational', 'traditional', 'reference', 'colonizer',
'plotted']
singles = [stemmer.stem(word) for word in test_strs]
print(' '.join(singles))
|
# 出现频率特别高的和频率特别低的词对于文本分析帮助不大,一般在预处理阶段会过滤掉。
# 在英文里,经典的停用词为 “The”, "an"....
# 方法1: 自己建立一个停用词词典
stop_words = ["the", "an", "is", "there"]
# 在使用时: 假设 word_list包含了文本里的单词
word_list = ["we", "are", "the", "students"]
filtered_words = [word for word in word_list if word not in stop_words]
print (filtered_words)
# 方法2:直接利用别人已经构建好的停用词库
from nltk.corpus import stopwords
cachedStopWords = stopwords.words("english")
from nltk.stem.porter import *
stemmer = PorterStemmer()
test_strs = ['caresses', 'flies', 'dies', 'mules', 'denied',
'died', 'agreed', 'owned', 'humbled', 'sized',
'meeting', 'stating', 'siezing', 'itemization',
'sensational', 'traditional', 'reference', 'colonizer',
'plotted']
singles = [stemmer.stem(word) for word in test_strs]
print(' '.join(singles)) # doctest: +NORMALIZE_WHITESPACE
|
[
0,
1,
2,
3,
4
] |
2,499 |
664f9d5aa981c3590043fae1d0c80441bda4fbb1
|
<mask token>
@app.route('/')
def home():
thing = request.args.get('thing')
height = request.args.get('height')
color = request.args.get('color')
return render_template('home1.html', thing=thing, height=height, color=
color)
<mask token>
|
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
@app.route('/')
def home():
thing = request.args.get('thing')
height = request.args.get('height')
color = request.args.get('color')
return render_template('home1.html', thing=thing, height=height, color=
color)
if __name__ == '__main__':
app.run(debug=True)
|
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
app = Flask(__name__)
@app.route('/')
def home():
thing = request.args.get('thing')
height = request.args.get('height')
color = request.args.get('color')
return render_template('home1.html', thing=thing, height=height, color=
color)
if __name__ == '__main__':
app.run(debug=True)
|
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
print(
"""
================================ RESTART ================================
"""
)
<mask token>
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def home():
thing = request.args.get('thing')
height = request.args.get('height')
color = request.args.get('color')
return render_template('home1.html', thing=thing, height=height, color=
color)
if __name__ == '__main__':
app.run(debug=True)
|
#!/usr/bin/env python3
'''Глава 9. Распутываем Всемирную паутину'''
'''1. Если вы еще не установили Flask, сделайте это сейчас.
Это также установит werkzeug, jinja2 и, возможно, другие пакеты.'''
# pip3 install flask
print('\n================================ RESTART ================================\n')
'''2. Создайте скелет сайта с помощью веб-сервера Flask.
Убедитесь, что сервер начинает свою работу по адресу Localhost на стандартном порте 5000.
Если ваш компьютер уже использует порт 5000 для чего-то еще, воспользуйтесь другим портом.'''
'''from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run(port=5000, debug=True)'''
print('\n================================ RESTART ================================\n')
'''3. Добавьте функцию home() для обработки запросов к домашней странице. Пусть она возвращает строку It's alive!.'''
'''from flask import Flask
app = Flask(__name__)
@app.route("/")
def home():
return "It's alive!"
if __name__ == "__main__":
app.run(debug=True)'''
print('\n================================ RESTART ================================\n')
'''4. Создайте шаблон для jinja2, который называется home1.html и содержит следующий контент:
<html>
<head>
<title>It's alive!</title>
<body>
I'm of course referring to {{thing}}, which is {{height}} feet tall and {{color}}.
</body>
</html>'''
print('\n================================ RESTART ================================\n')
'''5. Модифицируйте функцию home() вашего сервера, чтобы она использовала шаблон home1.html.
Передайте ей три параметра для команды GET: thing, height и color.'''
'''Перейдите в своем клиенте по следующему адресу:
http://localhost:5000/?thing=Octothorpe&height=7&color=green'''
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def home():
thing = request.args.get('thing')
height = request.args.get('height')
color = request.args.get('color')
return render_template('home1.html', thing=thing, height=height, color=color)
if __name__ == "__main__":
app.run(debug=True)
|
[
1,
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.