blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e5bfeee02160589220079caf6d6e3e3b76ab585 | 629090051b975b5814b4b48e2cb2c784fa6705e4 | /pgsmo/objects/sequence/sequence.py | 58b4198fa17dee038f943fed6dd518f8db8054e6 | [
"MIT"
]
| permissive | microsoft/pgtoolsservice | 3d3597821c7cae1d216436d4f8143929e2c8a82a | 24a048226f7f30c775bbcbab462d499a465be5da | refs/heads/master | 2023-08-28T12:55:47.817628 | 2023-08-25T22:47:53 | 2023-08-25T22:47:53 | 80,681,087 | 68 | 35 | NOASSERTION | 2023-09-13T21:46:55 | 2017-02-02T01:00:33 | Python | UTF-8 | Python | false | false | 6,637 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import Optional, List, Dict
from smo.common.node_object import NodeObject, NodeLazyPropertyCollection, NodeCollection
from smo.common.scripting_mixins import ScriptableCreate, ScriptableDelete, ScriptableUpdate
from pgsmo.objects.server import server as s # noqa
import smo.utils.templating as templating
class Sequence(NodeObject, ScriptableCreate, ScriptableDelete, ScriptableUpdate):
TEMPLATE_ROOT = templating.get_template_root(__file__, 'templates')
MACRO_ROOT = templating.get_template_root(__file__, 'macros')
GLOBAL_MACRO_ROOT = templating.get_template_root(__file__, '../global_macros')
@classmethod
def _from_node_query(cls, server: 's.Server', parent: NodeObject, **kwargs) -> 'Sequence':
"""
Creates a Sequence object from the result of a sequence node query
:param server: Server that owns the sequence
:param parent: Parent object of the sequence
:param kwargs: Row from a sequence node query
Kwargs:
oid int: Object ID of the sequence
name str: Name of the sequence
:return: A Sequence instance
"""
seq = cls(server, parent, kwargs['name'])
seq._oid = kwargs['oid']
seq._schema = kwargs['schema']
seq._scid = kwargs['schemaoid']
seq._is_system = kwargs['is_system']
return seq
def __init__(self, server: 's.Server', parent: NodeObject, name: str):
self._server = server
self._parent: Optional['NodeObject'] = parent
self._name: str = name
self._oid: Optional[int] = None
self._is_system: bool = False
self._child_collections: Dict[str, NodeCollection] = {}
self._property_collections: List[NodeLazyPropertyCollection] = []
# Use _column_property_generator instead of _property_generator
self._full_properties: NodeLazyPropertyCollection = self._register_property_collection(self._sequence_property_generator)
ScriptableCreate.__init__(self, self._template_root(server), self._macro_root(), server.version)
ScriptableDelete.__init__(self, self._template_root(server), self._macro_root(), server.version)
ScriptableUpdate.__init__(self, self._template_root(server), self._macro_root(), server.version)
self._schema: str = None
self._scid: int = None
self._def: dict = None
def _sequence_property_generator(self):
template_root = self._template_root(self._server)
# Setup the parameters for the query
template_vars = self.template_vars
# Render and execute the template
sql = templating.render_template(
templating.get_template_path(template_root, 'properties.sql', self._server.version),
self._macro_root(),
**template_vars
)
cols, rows = self._server.connection.execute_dict(sql)
if len(rows) > 0:
return rows[0]
# PROPERTIES ###########################################################
@property
def schema(self):
return self._schema
@property
def scid(self):
return self._scid
# -FULL OBJECT PROPERTIES ##############################################
@property
def cycled(self):
return self._full_properties.get("cycled", "")
@property
def increment(self):
return self._full_properties.get("increment", "")
@property
def start(self):
return self._full_properties.get("start", "")
@property
def current_value(self):
return self._full_properties.get("current_value", "")
@property
def minimum(self):
return self._full_properties.get("minimum", "")
@property
def maximum(self):
return self._full_properties.get("maximum", "")
@property
def cache(self):
return self._full_properties.get("cache", "")
@property
def cascade(self):
return self._full_properties.get("cascade", "")
@property
def seqowner(self):
return self._full_properties.get("seqowner", "")
@property
def comment(self):
return self._full_properties.get("comment", "")
# IMPLEMENTATION DETAILS ###############################################
@classmethod
def _macro_root(cls) -> List[str]:
return [cls.MACRO_ROOT, cls.GLOBAL_MACRO_ROOT]
@classmethod
def _template_root(cls, server: 's.Server') -> str:
return cls.TEMPLATE_ROOT
# HELPER METHODS ##################################################################
def _create_query_data(self):
""" Gives the data object for create query """
return {"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
}}
def _update_query_data(self):
""" Gives the data object for update query """
return {
"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
},
"o_data": {
"schema": self.schema,
"name": self.name,
"seqowner": self.seqowner,
"comment": self.comment
}
}
def _delete_query_data(self):
""" Gives the data object for update query """
return {
"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
},
"cascade": self.cascade
}
| [
"[email protected]"
]
| |
c674b0a58e029302461e9515a02b8d8294b99a98 | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /website_sale/models/product.py | a5439d9d8d7175fb044e2fceb1e9f39ddba127a7 | []
| no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,408 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
from odoo.addons import decimal_precision as dp
from odoo.addons.website.models import ir_http
from odoo.tools.translate import html_translate
class ProductStyle(models.Model):
_name = "product.style"
_description = 'Product Style'
name = fields.Char(string='Style Name', required=True)
html_class = fields.Char(string='HTML Classes')
class ProductPricelist(models.Model):
_inherit = "product.pricelist"
def _default_website(self):
""" Find the first company's website, if there is one. """
company_id = self.env.user.company_id.id
domain = [('company_id', '=', company_id)]
return self.env['website'].search(domain, limit=1)
website_id = fields.Many2one('website', string="Website", default=_default_website)
code = fields.Char(string='E-commerce Promotional Code', groups="base.group_user")
selectable = fields.Boolean(help="Allow the end user to choose this price list")
def clear_cache(self):
# website._get_pl_partner_order() is cached to avoid to recompute at each request the
# list of available pricelists. So, we need to invalidate the cache when
# we change the config of website price list to force to recompute.
website = self.env['website']
website._get_pl_partner_order.clear_cache(website)
@api.model
def create(self, data):
res = super(ProductPricelist, self).create(data)
self.clear_cache()
return res
@api.multi
def write(self, data):
res = super(ProductPricelist, self).write(data)
self.clear_cache()
return res
@api.multi
def unlink(self):
res = super(ProductPricelist, self).unlink()
self.clear_cache()
return res
def _get_partner_pricelist_multi_search_domain_hook(self):
domain = super(ProductPricelist, self)._get_partner_pricelist_multi_search_domain_hook()
website = ir_http.get_request_website()
if website:
domain += self._get_website_pricelists_domain(website.id)
return domain
def _get_partner_pricelist_multi_filter_hook(self):
res = super(ProductPricelist, self)._get_partner_pricelist_multi_filter_hook()
website = ir_http.get_request_website()
if website:
res = res.filtered(lambda pl: pl._is_available_on_website(website.id))
return res
@api.multi
def _is_available_on_website(self, website_id):
""" To be able to be used on a website, a pricelist should either:
- Have its `website_id` set to current website (specific pricelist).
- Have no `website_id` set and should be `selectable` (generic pricelist)
or should have a `code` (generic promotion).
Note: A pricelist without a website_id, not selectable and without a
code is a backend pricelist.
Change in this method should be reflected in `_get_website_pricelists_domain`.
"""
self.ensure_one()
return self.website_id.id == website_id or (not self.website_id and (self.selectable or self.sudo().code))
def _get_website_pricelists_domain(self, website_id):
''' Check above `_is_available_on_website` for explanation.
Change in this method should be reflected in `_is_available_on_website`.
'''
return [
'|', ('website_id', '=', website_id),
'&', ('website_id', '=', False),
'|', ('selectable', '=', True), ('code', '!=', False),
]
def _get_partner_pricelist_multi(self, partner_ids, company_id=None):
''' If `property_product_pricelist` is read from website, we should use
the website's company and not the user's one.
Passing a `company_id` to super will avoid using the current user's
company.
'''
website = ir_http.get_request_website()
if not company_id and website:
company_id = website.company_id.id
return super(ProductPricelist, self)._get_partner_pricelist_multi(partner_ids, company_id)
class ProductPublicCategory(models.Model):
_name = "product.public.category"
_inherit = ["website.seo.metadata", "website.multi.mixin"]
_description = "Website Product Category"
_order = "sequence, name"
name = fields.Char(required=True, translate=True)
parent_id = fields.Many2one('product.public.category', string='Parent Category', index=True)
child_id = fields.One2many('product.public.category', 'parent_id', string='Children Categories')
sequence = fields.Integer(help="Gives the sequence order when displaying a list of product categories.")
# NOTE: there is no 'default image', because by default we don't show
# thumbnails for categories. However if we have a thumbnail for at least one
# category, then we display a default image on the other, so that the
# buttons have consistent styling.
# In this case, the default image is set by the js code.
image = fields.Binary(attachment=True, help="This field holds the image used as image for the category, limited to 1024x1024px.")
image_medium = fields.Binary(string='Medium-sized image', attachment=True,
help="Medium-sized image of the category. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary(string='Small-sized image', attachment=True,
help="Small-sized image of the category. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
@api.model
def create(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).create(vals)
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).write(vals)
@api.constrains('parent_id')
def check_parent_id(self):
if not self._check_recursion():
raise ValueError(_('Error ! You cannot create recursive categories.'))
@api.multi
def name_get(self):
res = []
for category in self:
names = [category.name]
parent_category = category.parent_id
while parent_category:
names.append(parent_category.name)
parent_category = parent_category.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
class ProductTemplate(models.Model):
_inherit = ["product.template", "website.seo.metadata", 'website.published.multi.mixin', 'rating.mixin']
_name = 'product.template'
_mail_post_access = 'read'
website_description = fields.Html('Description for the website', sanitize_attributes=False, translate=html_translate)
alternative_product_ids = fields.Many2many('product.template', 'product_alternative_rel', 'src_id', 'dest_id',
string='Alternative Products', help='Suggest alternatives to your customer'
'(upsell strategy).Those product show up on the product page.')
accessory_product_ids = fields.Many2many('product.product', 'product_accessory_rel', 'src_id', 'dest_id',
string='Accessory Products', help='Accessories show up when the customer'
'reviews the cart before payment (cross-sell strategy).')
website_size_x = fields.Integer('Size X', default=1)
website_size_y = fields.Integer('Size Y', default=1)
website_style_ids = fields.Many2many('product.style', string='Styles')
website_sequence = fields.Integer('Website Sequence', help="Determine the display order in the Website E-commerce",
default=lambda self: self._default_website_sequence())
public_categ_ids = fields.Many2many('product.public.category', string='Website Product Category',
help="The product will be available in each mentioned e-commerce category. Go to"
"Shop > Customize and enable 'E-commerce categories' to view all e-commerce categories.")
product_image_ids = fields.One2many('product.image', 'product_tmpl_id', string='Images')
# website_price deprecated, directly use _get_combination_info instead
website_price = fields.Float('Website price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_public_price deprecated, directly use _get_combination_info instead
website_public_price = fields.Float('Website public price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_price_difference deprecated, directly use _get_combination_info instead
website_price_difference = fields.Boolean('Website price difference', compute='_website_price')
def _website_price(self):
current_website = self.env['website'].get_current_website()
for template in self.with_context(website_id=current_website.id):
res = template._get_combination_info()
template.website_price = res.get('price')
template.website_public_price = res.get('list_price')
template.website_price_difference = res.get('has_discounted_price')
@api.multi
def _has_no_variant_attributes(self):
"""Return whether this `product.template` has at least one no_variant
attribute.
:return: True if at least one no_variant attribute, False otherwise
:rtype: bool
"""
self.ensure_one()
return any(a.create_variant == 'no_variant' for a in self._get_valid_product_attributes())
@api.multi
def _has_is_custom_values(self):
self.ensure_one()
"""Return whether this `product.template` has at least one is_custom
attribute value.
:return: True if at least one is_custom attribute value, False otherwise
:rtype: bool
"""
return any(v.is_custom for v in self._get_valid_product_attribute_values())
@api.multi
def _is_quick_add_to_cart_possible(self, parent_combination=None):
"""
It's possible to quickly add to cart if there's no optional product,
there's only one possible combination and no value is set to is_custom.
Attributes set to dynamic or no_variant don't have to be tested
specifically because they will be taken into account when checking for
the possible combinations.
:param parent_combination: combination from which `self` is an
optional or accessory product
:type parent_combination: recordset `product.template.attribute.value`
:return: True if it's possible to quickly add to cart, else False
:rtype: bool
"""
self.ensure_one()
if not self._is_add_to_cart_possible(parent_combination):
return False
gen = self._get_possible_combinations(parent_combination)
first_possible_combination = next(gen)
if next(gen, False) is not False:
# there are at least 2 possible combinations.
return False
if self._has_is_custom_values():
return False
if self.optional_product_ids.filtered(lambda p: p._is_add_to_cart_possible(first_possible_combination)):
return False
return True
@api.multi
def _get_possible_variants_sorted(self, parent_combination=None):
"""Return the sorted recordset of variants that are possible.
The order is based on the order of the attributes and their values.
See `_get_possible_variants` for the limitations of this method with
dynamic or no_variant attributes, and also for a warning about
performances.
:param parent_combination: combination from which `self` is an
optional or accessory product
:type parent_combination: recordset `product.template.attribute.value`
:return: the sorted variants that are possible
:rtype: recordset of `product.product`
"""
self.ensure_one()
def _sort_key_attribute_value(value):
# if you change this order, keep it in sync with _order from `product.attribute`
return (value.attribute_id.sequence, value.attribute_id.id)
def _sort_key_variant(variant):
"""
We assume all variants will have the same attributes, with only one value for each.
- first level sort: same as "product.attribute"._order
- second level sort: same as "product.attribute.value"._order
"""
keys = []
for attribute in variant.attribute_value_ids.sorted(_sort_key_attribute_value):
# if you change this order, keep it in sync with _order from `product.attribute.value`
keys.append(attribute.sequence)
keys.append(attribute.id)
return keys
return self._get_possible_variants(parent_combination).sorted(_sort_key_variant)
@api.multi
def _get_combination_info(self, combination=False, product_id=False, add_qty=1, pricelist=False, parent_combination=False, only_template=False):
"""Override for website, where we want to:
- take the website pricelist if no pricelist is set
- apply the b2b/b2c setting to the result
This will work when adding website_id to the context, which is done
automatically when called from routes with website=True.
"""
self.ensure_one()
current_website = False
if self.env.context.get('website_id'):
current_website = self.env['website'].get_current_website()
if not pricelist:
pricelist = current_website.get_current_pricelist()
combination_info = super(ProductTemplate, self)._get_combination_info(
combination=combination, product_id=product_id, add_qty=add_qty, pricelist=pricelist,
parent_combination=parent_combination, only_template=only_template)
if self.env.context.get('website_id'):
partner = self.env.user.partner_id
company_id = current_website.company_id
product = self.env['product.product'].browse(combination_info['product_id']) or self
tax_display = self.env.user.has_group('account.group_show_line_subtotals_tax_excluded') and 'total_excluded' or 'total_included'
taxes = partner.property_account_position_id.map_tax(product.sudo().taxes_id.filtered(lambda x: x.company_id == company_id), product, partner)
# The list_price is always the price of one.
quantity_1 = 1
price = taxes.compute_all(combination_info['price'], pricelist.currency_id, quantity_1, product, partner)[tax_display]
if pricelist.discount_policy == 'without_discount':
list_price = taxes.compute_all(combination_info['list_price'], pricelist.currency_id, quantity_1, product, partner)[tax_display]
else:
list_price = price
has_discounted_price = pricelist.currency_id.compare_amounts(list_price, price) == 1
combination_info.update(
price=price,
list_price=list_price,
has_discounted_price=has_discounted_price,
)
return combination_info
@api.multi
def _create_first_product_variant(self, log_warning=False):
"""Create if necessary and possible and return the first product
variant for this template.
:param log_warning: whether a warning should be logged on fail
:type log_warning: bool
:return: the first product variant or none
:rtype: recordset of `product.product`
"""
return self._create_product_variant(self._get_first_possible_combination(), log_warning)
@api.multi
def _get_current_company_fallback(self, **kwargs):
"""Override: if a website is set on the product or given, fallback to
the company of the website. Otherwise use the one from parent method."""
res = super(ProductTemplate, self)._get_current_company_fallback(**kwargs)
website = self.website_id or kwargs.get('website')
return website and website.company_id or res
def _default_website_sequence(self):
self._cr.execute("SELECT MIN(website_sequence) FROM %s" % self._table)
min_sequence = self._cr.fetchone()[0]
return min_sequence and min_sequence - 1 or 10
def set_sequence_top(self):
self.website_sequence = self.sudo().search([], order='website_sequence desc', limit=1).website_sequence + 1
def set_sequence_bottom(self):
self.website_sequence = self.sudo().search([], order='website_sequence', limit=1).website_sequence - 1
def set_sequence_up(self):
previous_product_tmpl = self.sudo().search(
[('website_sequence', '>', self.website_sequence), ('website_published', '=', self.website_published)],
order='website_sequence', limit=1)
if previous_product_tmpl:
previous_product_tmpl.website_sequence, self.website_sequence = self.website_sequence, previous_product_tmpl.website_sequence
else:
self.set_sequence_top()
def set_sequence_down(self):
next_prodcut_tmpl = self.search([('website_sequence', '<', self.website_sequence), ('website_published', '=', self.website_published)], order='website_sequence desc', limit=1)
if next_prodcut_tmpl:
next_prodcut_tmpl.website_sequence, self.website_sequence = self.website_sequence, next_prodcut_tmpl.website_sequence
else:
return self.set_sequence_bottom()
def _default_website_meta(self):
res = super(ProductTemplate, self)._default_website_meta()
res['default_opengraph']['og:description'] = res['default_twitter']['twitter:description'] = self.description_sale
res['default_opengraph']['og:title'] = res['default_twitter']['twitter:title'] = self.name
res['default_opengraph']['og:image'] = res['default_twitter']['twitter:image'] = "/web/image/product.template/%s/image" % (self.id)
return res
@api.multi
def _compute_website_url(self):
super(ProductTemplate, self)._compute_website_url()
for product in self:
product.website_url = "/shop/product/%s" % (product.id,)
class Product(models.Model):
_inherit = "product.product"
website_id = fields.Many2one(related='product_tmpl_id.website_id', readonly=False)
# website_price deprecated, directly use _get_combination_info instead
website_price = fields.Float('Website price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_public_price deprecated, directly use _get_combination_info instead
website_public_price = fields.Float('Website public price', compute='_website_price', digits=dp.get_precision('Product Price'))
# website_price_difference deprecated, directly use _get_combination_info instead
website_price_difference = fields.Boolean('Website price difference', compute='_website_price')
def _website_price(self):
for product in self:
res = product._get_combination_info_variant()
product.website_price = res.get('price')
product.website_public_price = res.get('list_price')
product.website_price_difference = res.get('has_discounted_price')
@api.multi
def website_publish_button(self):
self.ensure_one()
return self.product_tmpl_id.website_publish_button()
class ProductImage(models.Model):
_name = 'product.image'
_description = 'Product Image'
name = fields.Char('Name')
image = fields.Binary('Image', attachment=True)
product_tmpl_id = fields.Many2one('product.template', 'Related Product', copy=True)
| [
"[email protected]"
]
| |
22b9583d4e86075bcd2f54a1ae3c118d1a1510ef | 0bb49acb7bb13a09adafc2e43e339f4c956e17a6 | /OpenAssembler/Gui/OAS_Window/oas_main04.py | 92052edb6cc3e03aebcd64f5fd56a89706efd491 | []
| no_license | all-in-one-of/openassembler-7 | 94f6cdc866bceb844246de7920b7cbff9fcc69bf | 69704d1c4aa4b1b99f484c8c7884cf73d412fafe | refs/heads/master | 2021-01-04T18:08:10.264830 | 2010-07-02T10:50:16 | 2010-07-02T10:50:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,262 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'oas_main04.ui'
#
# Created: Wed Jul 15 10:23:49 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_oasWindow(object):
def setupUi(self, oasWindow):
oasWindow.setObjectName("oasWindow")
oasWindow.resize(885, 533)
self.oas_centralwidget = QtGui.QWidget(oasWindow)
self.oas_centralwidget.setObjectName("oas_centralwidget")
self.gridLayout_6 = QtGui.QGridLayout(self.oas_centralwidget)
self.gridLayout_6.setMargin(2)
self.gridLayout_6.setSpacing(2)
self.gridLayout_6.setObjectName("gridLayout_6")
self.oas_splitter = QtGui.QSplitter(self.oas_centralwidget)
self.oas_splitter.setOrientation(QtCore.Qt.Horizontal)
self.oas_splitter.setObjectName("oas_splitter")
self.oas_splitter02 = QtGui.QSplitter(self.oas_splitter)
self.oas_splitter02.setOrientation(QtCore.Qt.Vertical)
self.oas_splitter02.setObjectName("oas_splitter02")
self.oas_menuline_frame = QtGui.QFrame(self.oas_splitter02)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_menuline_frame.sizePolicy().hasHeightForWidth())
self.oas_menuline_frame.setSizePolicy(sizePolicy)
self.oas_menuline_frame.setMinimumSize(QtCore.QSize(0, 30))
self.oas_menuline_frame.setMaximumSize(QtCore.QSize(16777215, 30))
self.oas_menuline_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.oas_menuline_frame.setFrameShadow(QtGui.QFrame.Raised)
self.oas_menuline_frame.setObjectName("oas_menuline_frame")
self.gridLayout_3 = QtGui.QGridLayout(self.oas_menuline_frame)
self.gridLayout_3.setMargin(2)
self.gridLayout_3.setSpacing(2)
self.gridLayout_3.setObjectName("gridLayout_3")
self.oas_horizontalLayout_3 = QtGui.QHBoxLayout()
self.oas_horizontalLayout_3.setSpacing(2)
self.oas_horizontalLayout_3.setObjectName("oas_horizontalLayout_3")
self.oas_new_bu = QtGui.QToolButton(self.oas_menuline_frame)
self.oas_new_bu.setObjectName("oas_new_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_new_bu)
spacerItem = QtGui.QSpacerItem(10, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_3.addItem(spacerItem)
self.oas_open_bu = QtGui.QToolButton(self.oas_menuline_frame)
self.oas_open_bu.setObjectName("oas_open_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_open_bu)
self.oas_save_bu = QtGui.QToolButton(self.oas_menuline_frame)
self.oas_save_bu.setObjectName("oas_save_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_save_bu)
self.oas_saveas_bu = QtGui.QToolButton(self.oas_menuline_frame)
self.oas_saveas_bu.setObjectName("oas_saveas_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_saveas_bu)
spacerItem1 = QtGui.QSpacerItem(15, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_3.addItem(spacerItem1)
self.oas_run_bu = QtGui.QToolButton(self.oas_menuline_frame)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(86, 255, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 255, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 255, 56))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(86, 255, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 255, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 255, 56))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(28, 28, 28))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 255, 26))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(28, 28, 28))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.oas_run_bu.setPalette(palette)
self.oas_run_bu.setObjectName("oas_run_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_run_bu)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_3.addItem(spacerItem2)
self.oas_search_entry = QtGui.QLineEdit(self.oas_menuline_frame)
self.oas_search_entry.setObjectName("oas_search_entry")
self.oas_horizontalLayout_3.addWidget(self.oas_search_entry)
self.oas_search_bu = QtGui.QToolButton(self.oas_menuline_frame)
self.oas_search_bu.setObjectName("oas_search_bu")
self.oas_horizontalLayout_3.addWidget(self.oas_search_bu)
self.gridLayout_3.addLayout(self.oas_horizontalLayout_3, 0, 0, 1, 1)
self.oas_graphicsView = QtGui.QGraphicsView(self.oas_splitter02)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(225, 225, 225))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(187, 187, 187))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(202, 202, 202))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(225, 225, 225))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(187, 187, 187))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(202, 202, 202))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(225, 225, 225))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(187, 187, 187))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(75, 75, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.oas_graphicsView.setPalette(palette)
self.oas_graphicsView.setFocusPolicy(QtCore.Qt.StrongFocus)
self.oas_graphicsView.setObjectName("oas_graphicsView")
self.oas_timeline_frame = QtGui.QFrame(self.oas_splitter02)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_timeline_frame.sizePolicy().hasHeightForWidth())
self.oas_timeline_frame.setSizePolicy(sizePolicy)
self.oas_timeline_frame.setMinimumSize(QtCore.QSize(0, 70))
self.oas_timeline_frame.setMaximumSize(QtCore.QSize(16777215, 70))
self.oas_timeline_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.oas_timeline_frame.setFrameShadow(QtGui.QFrame.Raised)
self.oas_timeline_frame.setObjectName("oas_timeline_frame")
self.gridLayout = QtGui.QGridLayout(self.oas_timeline_frame)
self.gridLayout.setMargin(2)
self.gridLayout.setSpacing(2)
self.gridLayout.setObjectName("gridLayout")
self.oas_verticalLayout = QtGui.QVBoxLayout()
self.oas_verticalLayout.setObjectName("oas_verticalLayout")
self.oas_horizontalLayout = QtGui.QHBoxLayout()
self.oas_horizontalLayout.setObjectName("oas_horizontalLayout")
self.oas_sframe_spin = QtGui.QSpinBox(self.oas_timeline_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_sframe_spin.sizePolicy().hasHeightForWidth())
self.oas_sframe_spin.setSizePolicy(sizePolicy)
self.oas_sframe_spin.setMinimum(-100000)
self.oas_sframe_spin.setMaximum(100000)
self.oas_sframe_spin.setObjectName("oas_sframe_spin")
self.oas_horizontalLayout.addWidget(self.oas_sframe_spin)
self.oas_time_slider = QtGui.QSlider(self.oas_timeline_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_time_slider.sizePolicy().hasHeightForWidth())
self.oas_time_slider.setSizePolicy(sizePolicy)
self.oas_time_slider.setSliderPosition(0)
self.oas_time_slider.setOrientation(QtCore.Qt.Horizontal)
self.oas_time_slider.setTickPosition(QtGui.QSlider.TicksBothSides)
self.oas_time_slider.setTickInterval(0)
self.oas_time_slider.setObjectName("oas_time_slider")
self.oas_horizontalLayout.addWidget(self.oas_time_slider)
self.oas_eframe_spin = QtGui.QSpinBox(self.oas_timeline_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_eframe_spin.sizePolicy().hasHeightForWidth())
self.oas_eframe_spin.setSizePolicy(sizePolicy)
self.oas_eframe_spin.setMinimum(-100000)
self.oas_eframe_spin.setMaximum(100000)
self.oas_eframe_spin.setObjectName("oas_eframe_spin")
self.oas_horizontalLayout.addWidget(self.oas_eframe_spin)
self.oas_verticalLayout.addLayout(self.oas_horizontalLayout)
self.oas_horizontalLayout_2 = QtGui.QHBoxLayout()
self.oas_horizontalLayout_2.setObjectName("oas_horizontalLayout_2")
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_2.addItem(spacerItem3)
self.oas_firstF = QtGui.QToolButton(self.oas_timeline_frame)
self.oas_firstF.setMinimumSize(QtCore.QSize(20, 20))
self.oas_firstF.setMaximumSize(QtCore.QSize(20, 20))
self.oas_firstF.setObjectName("oas_firstF")
self.oas_horizontalLayout_2.addWidget(self.oas_firstF)
self.oas_prewF = QtGui.QToolButton(self.oas_timeline_frame)
self.oas_prewF.setMinimumSize(QtCore.QSize(20, 20))
self.oas_prewF.setMaximumSize(QtCore.QSize(20, 20))
self.oas_prewF.setObjectName("oas_prewF")
self.oas_horizontalLayout_2.addWidget(self.oas_prewF)
self.oas_cframe_spin = QtGui.QSpinBox(self.oas_timeline_frame)
self.oas_cframe_spin.setMinimumSize(QtCore.QSize(0, 20))
self.oas_cframe_spin.setMaximumSize(QtCore.QSize(16777215, 20))
self.oas_cframe_spin.setMinimum(-100000)
self.oas_cframe_spin.setMaximum(100000)
self.oas_cframe_spin.setObjectName("oas_cframe_spin")
self.oas_horizontalLayout_2.addWidget(self.oas_cframe_spin)
self.oas_nextF = QtGui.QToolButton(self.oas_timeline_frame)
self.oas_nextF.setMinimumSize(QtCore.QSize(20, 20))
self.oas_nextF.setMaximumSize(QtCore.QSize(20, 20))
self.oas_nextF.setObjectName("oas_nextF")
self.oas_horizontalLayout_2.addWidget(self.oas_nextF)
self.oas_lastF = QtGui.QToolButton(self.oas_timeline_frame)
self.oas_lastF.setMinimumSize(QtCore.QSize(20, 20))
self.oas_lastF.setMaximumSize(QtCore.QSize(20, 20))
self.oas_lastF.setObjectName("oas_lastF")
self.oas_horizontalLayout_2.addWidget(self.oas_lastF)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_2.addItem(spacerItem4)
self.oas_verticalLayout.addLayout(self.oas_horizontalLayout_2)
self.gridLayout.addLayout(self.oas_verticalLayout, 0, 0, 1, 1)
self.oas_splitter03 = QtGui.QSplitter(self.oas_splitter)
self.oas_splitter03.setOrientation(QtCore.Qt.Vertical)
self.oas_splitter03.setObjectName("oas_splitter03")
self.oas_attribute_frame = QtGui.QFrame(self.oas_splitter03)
self.oas_attribute_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.oas_attribute_frame.setFrameShadow(QtGui.QFrame.Raised)
self.oas_attribute_frame.setObjectName("oas_attribute_frame")
self.gridLayout_2 = QtGui.QGridLayout(self.oas_attribute_frame)
self.gridLayout_2.setMargin(2)
self.gridLayout_2.setSpacing(2)
self.gridLayout_2.setObjectName("gridLayout_2")
self.oas_verticalLayout_2 = QtGui.QVBoxLayout()
self.oas_verticalLayout_2.setObjectName("oas_verticalLayout_2")
self.oas_nodeName = QtGui.QLineEdit(self.oas_attribute_frame)
self.oas_nodeName.setObjectName("oas_nodeName")
self.oas_verticalLayout_2.addWidget(self.oas_nodeName)
self.oas_horizontalLayout_4 = QtGui.QHBoxLayout()
self.oas_horizontalLayout_4.setObjectName("oas_horizontalLayout_4")
self.oas_label_2 = QtGui.QLabel(self.oas_attribute_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_label_2.sizePolicy().hasHeightForWidth())
self.oas_label_2.setSizePolicy(sizePolicy)
self.oas_label_2.setObjectName("oas_label_2")
self.oas_horizontalLayout_4.addWidget(self.oas_label_2)
self.oas_attribute_nodetype = QtGui.QLabel(self.oas_attribute_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_attribute_nodetype.sizePolicy().hasHeightForWidth())
self.oas_attribute_nodetype.setSizePolicy(sizePolicy)
self.oas_attribute_nodetype.setObjectName("oas_attribute_nodetype")
self.oas_horizontalLayout_4.addWidget(self.oas_attribute_nodetype)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.oas_horizontalLayout_4.addItem(spacerItem5)
self.oas_attribute_cache = QtGui.QCheckBox(self.oas_attribute_frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.oas_attribute_cache.sizePolicy().hasHeightForWidth())
self.oas_attribute_cache.setSizePolicy(sizePolicy)
self.oas_attribute_cache.setObjectName("oas_attribute_cache")
self.oas_horizontalLayout_4.addWidget(self.oas_attribute_cache)
self.oas_verticalLayout_2.addLayout(self.oas_horizontalLayout_4)
self.oas_attribute_area = QtGui.QScrollArea(self.oas_attribute_frame)
self.oas_attribute_area.setWidgetResizable(True)
self.oas_attribute_area.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.oas_attribute_area.setObjectName("oas_attribute_area")
self.oas_attribute_areaContents = QtGui.QWidget(self.oas_attribute_area)
self.oas_attribute_areaContents.setGeometry(QtCore.QRect(0, 0, 267, 127))
self.oas_attribute_areaContents.setLayoutDirection(QtCore.Qt.LeftToRight)
self.oas_attribute_areaContents.setObjectName("oas_attribute_areaContents")
self.gridLayout_5 = QtGui.QGridLayout(self.oas_attribute_areaContents)
self.gridLayout_5.setMargin(2)
self.gridLayout_5.setSpacing(2)
self.gridLayout_5.setObjectName("gridLayout_5")
self.oas_attribute_layout = QtGui.QVBoxLayout()
self.oas_attribute_layout.setObjectName("oas_attribute_layout")
self.place_to_widgets = QtGui.QVBoxLayout()
self.place_to_widgets.setObjectName("place_to_widgets")
self.oas_attribute_layout.addLayout(self.place_to_widgets)
spacerItem6 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.oas_attribute_layout.addItem(spacerItem6)
self.gridLayout_5.addLayout(self.oas_attribute_layout, 0, 0, 1, 1)
self.oas_attribute_area.setWidget(self.oas_attribute_areaContents)
self.oas_verticalLayout_2.addWidget(self.oas_attribute_area)
self.gridLayout_2.addLayout(self.oas_verticalLayout_2, 0, 0, 1, 1)
self.frame = QtGui.QFrame(self.oas_splitter03)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName("frame")
self.gridLayout_4 = QtGui.QGridLayout(self.frame)
self.gridLayout_4.setMargin(2)
self.gridLayout_4.setSpacing(2)
self.gridLayout_4.setObjectName("gridLayout_4")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.consoleOutArea = QtGui.QTextEdit(self.frame)
self.consoleOutArea.setReadOnly(True)
self.consoleOutArea.setObjectName("consoleOutArea")
self.verticalLayout.addWidget(self.consoleOutArea)
self.consoleInArea = QtGui.QLineEdit(self.frame)
self.consoleInArea.setObjectName("consoleInArea")
self.verticalLayout.addWidget(self.consoleInArea)
self.gridLayout_4.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.gridLayout_6.addWidget(self.oas_splitter, 0, 0, 1, 1)
oasWindow.setCentralWidget(self.oas_centralwidget)
self.oas_menubar = QtGui.QMenuBar(oasWindow)
self.oas_menubar.setEnabled(False)
self.oas_menubar.setGeometry(QtCore.QRect(0, 0, 885, 25))
self.oas_menubar.setObjectName("oas_menubar")
oasWindow.setMenuBar(self.oas_menubar)
self.retranslateUi(oasWindow)
QtCore.QObject.connect(self.oas_nextF, QtCore.SIGNAL("clicked()"), self.oas_cframe_spin.stepUp)
QtCore.QObject.connect(self.oas_prewF, QtCore.SIGNAL("clicked()"), self.oas_cframe_spin.stepDown)
QtCore.QObject.connect(self.oas_time_slider, QtCore.SIGNAL("sliderMoved(int)"), self.oas_cframe_spin.setValue)
QtCore.QObject.connect(self.oas_cframe_spin, QtCore.SIGNAL("valueChanged(int)"), self.oas_time_slider.setValue)
QtCore.QMetaObject.connectSlotsByName(oasWindow)
def retranslateUi(self, oasWindow):
oasWindow.setWindowTitle(QtGui.QApplication.translate("oasWindow", "OpenAssembler", None, QtGui.QApplication.UnicodeUTF8))
self.oas_new_bu.setText(QtGui.QApplication.translate("oasWindow", "New", None, QtGui.QApplication.UnicodeUTF8))
self.oas_open_bu.setText(QtGui.QApplication.translate("oasWindow", "Open", None, QtGui.QApplication.UnicodeUTF8))
self.oas_save_bu.setText(QtGui.QApplication.translate("oasWindow", "Save", None, QtGui.QApplication.UnicodeUTF8))
self.oas_saveas_bu.setText(QtGui.QApplication.translate("oasWindow", "SaveAs", None, QtGui.QApplication.UnicodeUTF8))
self.oas_run_bu.setText(QtGui.QApplication.translate("oasWindow", " RUN ", None, QtGui.QApplication.UnicodeUTF8))
self.oas_search_bu.setText(QtGui.QApplication.translate("oasWindow", "Search", None, QtGui.QApplication.UnicodeUTF8))
self.oas_firstF.setText(QtGui.QApplication.translate("oasWindow", "<<", None, QtGui.QApplication.UnicodeUTF8))
self.oas_prewF.setText(QtGui.QApplication.translate("oasWindow", "<", None, QtGui.QApplication.UnicodeUTF8))
self.oas_nextF.setText(QtGui.QApplication.translate("oasWindow", ">", None, QtGui.QApplication.UnicodeUTF8))
self.oas_lastF.setText(QtGui.QApplication.translate("oasWindow", ">>", None, QtGui.QApplication.UnicodeUTF8))
self.oas_label_2.setText(QtGui.QApplication.translate("oasWindow", "Node Type:", None, QtGui.QApplication.UnicodeUTF8))
self.oas_attribute_nodetype.setText(QtGui.QApplication.translate("oasWindow", "empty", None, QtGui.QApplication.UnicodeUTF8))
self.oas_attribute_cache.setText(QtGui.QApplication.translate("oasWindow", "cache", None, QtGui.QApplication.UnicodeUTF8))
| [
"laszlo.mates@732492aa-5b49-0410-a19c-07a6d82ec771"
]
| laszlo.mates@732492aa-5b49-0410-a19c-07a6d82ec771 |
32a9080820f79c628edcd8a11fb345d860e9800a | 28b1ed1359bd9539f9a15b64663652ec4eb3f284 | /Week_12/matplotlib_example.py | 301f43223dac2683ae8891d160b23ec806636397 | []
| no_license | achapkowski/Python_for_GIS_and_RS | 5fb68cbe1d46f28487e2a41099cf42b942587afa | 9b5d8da6b7bdbbfaa2f45b20d8704c317a86e785 | refs/heads/master | 2021-01-20T02:12:01.785780 | 2017-04-24T22:44:08 | 2017-04-24T22:44:08 | 89,385,947 | 1 | 0 | null | 2017-04-25T17:02:35 | 2017-04-25T17:02:35 | null | UTF-8 | Python | false | false | 1,115 | py | import xlrd
file_and_path = r"C:\Users\greg6750\Documents\IPython Notebooks\Python_for_GIS_and_RS\Week_12\SENZA_0_SUNAA_0_CORN.xlsx"
print("Reading Workbook")
workbook = xlrd.open_workbook(file_and_path)
worksheet = workbook.sheet_by_index(0)
freq = []
g = []
t = []
print("Creating Arrays")
for row in range(worksheet.nrows):
if row>0:
#Frequency
freq_cell = worksheet.cell(row,0)
freq.append(freq_cell.value)
GRR_cell = worksheet.cell(row,8)
g.append(GRR_cell.value)
TOA_cell = worksheet.cell(row,14)
t.append(TOA_cell.value)
#For plotting, import matplotlib
from matplotlib import pyplot as plt
#import matplotlib.pyplot as plt
##Basic single plot
#plt.plot(freq, g)
#plt.show()
####Multiple plots
##plt.subplot(211)
###plt.figure(1)
##plt.plot(freq, g, 'b-o')
##plt.subplot(2, 1, 2)
##plt.plot(freq, t, 'r-o')
##plt.show()
##Typing numpy and matplotlib together
import numpy as np
gaussian = np.random.normal(0, 1, 100000)
plt.hist(gaussian, bins=100)
#print "Mean: %f Standard Deviation: %f" % (gaussian.mean(), gaussian.std())
plt.show() | [
"[email protected]"
]
| |
de63f5be05fb160c05847158071ed0f615ee7519 | 5922398212b6e113f416a54d37c2765d7d119bb0 | /python/Binary Tree Serialization.py | 50267542ef2fe76b1e4ff14b7fd8af5aabe2c3f3 | []
| no_license | CrazyCoder4Carrot/lintcode | e777f73e1fdfe3b8abc9dbfc07d26602bf614151 | 33dcd7f0e2d9bee58840a3370837cb2db82de1eb | refs/heads/master | 2021-01-09T20:38:59.813198 | 2017-01-16T22:34:26 | 2017-01-16T22:34:26 | 60,287,619 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,258 | py | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
'''
@param root: An object of TreeNode, denote the root of the binary tree.
This method will be invoked first, you should design your own algorithm
to serialize a binary tree which denote by a root node to a string which
can be easily deserialized by your own "deserialize" method later.
'''
def serialize(self, root):
# write your code here
if not root:
return []
stack = [root]
data = []
while stack:
levelstack = []
for node in stack:
if node:
data.append(node.val)
levelstack.append(node.left)
levelstack.append(node.right)
else:
data.append('#')
stack = levelstack
i = len(data) - 1
while i >= 0:
if data[i] == '#':
del data[i]
i-=1
else:
return data
'''
@param data: A string serialized by your serialize method.
This method will be invoked second, the argument data is what exactly
you serialized at method "serialize", that means the data is not given by
system, it's given by your own serialize method. So the format of data is
designed by yourself, and deserialize it here as you serialize it in
"serialize" method.
'''
def deserialize(self, data):
# write your code here
if not data:
return None
root = TreeNode(data[0])
stack = [root]
i = 1
while stack:
levelstack = []
for node in stack:
node.left = TreeNode(data[i]) if i < len(data) and data[i] != "#" else None
i += 1
if node.left:
levelstack.append(node.left)
node.right = TreeNode(data[i]) if i < len(data) and data[i] != "#" else None
i += 1
if node.right:
levelstack.append(node.right)
stack = levelstack
return root | [
"[email protected]"
]
| |
6dd8a41262ec87d8286028a969c7d6f182b407b1 | 68a52ad1df836c9f6d922515b2f896b6928ce6a0 | /SafetyProductionSystem/weekworktask/migrations/0005_auto_20190225_1120.py | 48e63ae27db7629622745b675ecbae9443f84283 | []
| no_license | Chuazhen0/SafetyProductionSystem | 1141f845e04b032ff2a230c8def26066f061600c | 442d5df3818d43aebb9830f2456c73018aae2acf | refs/heads/master | 2020-05-20T12:47:46.365020 | 2019-05-08T09:56:01 | 2019-05-08T09:56:01 | 185,579,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,873 | py | # Generated by Django 2.0.5 on 2019-02-25 11:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weekworktask', '0004_auto_20190214_1558'),
]
operations = [
migrations.AlterField(
model_name='weekworktask',
name='created_at',
field=models.DateTimeField(null=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='weekworktask',
name='created_by',
field=models.ForeignKey(null=True, on_delete=models.SET('systemsettings.MyUser'), related_name='周期检测任务创建人', to='systemsettings.MyUser', verbose_name='创建人'),
),
migrations.AlterField(
model_name='weekworktask',
name='last_updated_at',
field=models.DateTimeField(null=True, verbose_name='最后更新时间'),
),
migrations.AlterField(
model_name='weekworktask',
name='last_updated_by',
field=models.ForeignKey(null=True, on_delete=models.SET('systemsettings.MyUser'), related_name='周期检测任务最后更新人', to='systemsettings.MyUser', verbose_name='最后更新人'),
),
migrations.AlterField(
model_name='weekworktask',
name='number',
field=models.CharField(max_length=30, null=True, verbose_name='周期检测任务编码'),
),
migrations.AlterField(
model_name='weekworktask',
name='task_start_time',
field=models.DateField(null=True, verbose_name='计划开始时间'),
),
migrations.AlterField(
model_name='weekworktask',
name='time_limit',
field=models.CharField(max_length=10, null=True, verbose_name='完成时限'),
),
]
| [
"[email protected]"
]
| |
0343fcf7a0ecf13d0cf6070e68aaf6fa43ea727c | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/stringMethods_20200707100259.py | 8206fc5fa46489851f957ce0776ea9caca48fe98 | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | def array(arr):
newArr = []
for i in range(len(arr)):
b =
print(arr[i])
array(["[6,7,5]","[1,8]"]) | [
"[email protected]"
]
| |
2f8083908138a26a1c74293a8d1ff16a6f17f9a0 | 59b0e278e6b60582e5ff70be604fa8e955b9c697 | /samples/demo_03.py | 089d8b235b42b04de576b81be9d94cf2fe34bf85 | []
| no_license | qq329999897/P3P4_API_LineTestFrame | 0a18b52feb37df301f1eb7a60a7a096ecd6709f9 | 71de1fc23dc976c5965865f4eb79dd78559c531d | refs/heads/master | 2023-01-05T14:48:35.546705 | 2020-11-01T01:52:59 | 2020-11-01T01:52:59 | 308,985,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python
# encoding: utf-8
# @author: liusir
# @file: demo_03.py
# @time: 2020/10/18 10:30 上午
import logging
import time
from logging import handlers
logger = logging.getLogger('newdream')
logger.setLevel( logging.DEBUG )
formatter = logging.Formatter('%(asctime)s - %(message)s')
th = handlers.TimedRotatingFileHandler("test.log",when='D',interval=1,backupCount=5)
th.setFormatter( formatter )
th.setLevel( logging.DEBUG )
th.suffix = "%Y_%m_%d_%H_%M_%S.log" #设置日志格式名称
logger.addHandler( th )
logger.info('hello1')
time.sleep(4)
logger.warning('hello2')
time.sleep(4)
logger.error('hello3') | [
"[email protected]"
]
| |
89476a745921e2247464d59ad3914c7f0d653c86 | 1fe0b680ce53bb3bb9078356ea2b25e572d9cfdc | /venv/lib/python2.7/site-packages/ansible/module_utils/network/f5/icontrol.py | 57d0bb727b98f408e9834e15d83b009a114b7b1b | [
"MIT"
]
| permissive | otus-devops-2019-02/devopscourses_infra | 1929c4a9eace3fdb0eb118bf216f3385fc0cdb1c | e42e5deafce395af869084ede245fc6cff6d0b2c | refs/heads/master | 2020-04-29T02:41:49.985889 | 2019-05-21T06:35:19 | 2019-05-21T06:35:19 | 175,780,457 | 0 | 1 | MIT | 2019-05-21T06:35:20 | 2019-03-15T08:35:54 | HCL | UTF-8 | Python | false | false | 18,370 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from BytesIO import BytesIO
except ImportError:
from io import BytesIO
from ansible.module_utils.urls import urlparse
from ansible.module_utils.urls import generic_urlparse
from ansible.module_utils.urls import Request
try:
import json as _json
except ImportError:
import simplejson as _json
try:
from library.module_utils.network.f5.common import F5ModuleError
except ImportError:
from ansible.module_utils.network.f5.common import F5ModuleError
"""An F5 REST API URI handler.
Use this module to make calls to an F5 REST server. It is influenced by the same
API that the Python ``requests`` tool uses, but the two are not the same, as the
library here is **much** more simple and targeted specifically to F5's needs.
The ``requests`` design was chosen due to familiarity with the tool. Internally,
the classes contained herein use Ansible native libraries.
The means by which you should use it are similar to ``requests`` basic usage.
Authentication is not handled for you automatically by this library, however it *is*
handled automatically for you in the supporting F5 module_utils code; specifically the
different product module_util files (bigip.py, bigiq.py, etc).
Internal (non-module) usage of this library looks like this.
```
# Create a session instance
mgmt = iControlRestSession()
mgmt.verify = False
server = '1.1.1.1'
port = 443
# Payload used for getting an initial authentication token
payload = {
'username': 'admin',
'password': 'secret',
'loginProviderName': 'tmos'
}
# Create URL to call, injecting server and port
url = f"https://{server}:{port}/mgmt/shared/authn/login"
# Call the API
resp = session.post(url, json=payload)
# View the response
print(resp.json())
# Update the session with the authentication token
session.headers['X-F5-Auth-Token'] = resp.json()['token']['token']
# Create another URL to call, injecting server and port
url = f"https://{server}:{port}/mgmt/tm/ltm/virtual/~Common~virtual1"
# Call the API
resp = session.get(url)
# View the details of a virtual payload
print(resp.json())
```
"""
from ansible.module_utils.six.moves.urllib.error import HTTPError
class Response(object):
def __init__(self):
self._content = None
self.status = None
self.headers = dict()
self.url = None
self.reason = None
self.request = None
self.msg = None
@property
def content(self):
return self._content
@property
def raw_content(self):
return self._content
def json(self):
return _json.loads(self._content or 'null')
@property
def ok(self):
if self.status is not None and int(self.status) > 400:
return False
try:
response = self.json()
if 'code' in response and response['code'] > 400:
return False
except ValueError:
pass
return True
class iControlRestSession(object):
"""Represents a session that communicates with a BigIP.
This acts as a loose wrapper around Ansible's ``Request`` class. We're doing
this as interim work until we move to the httpapi connector.
"""
def __init__(self, headers=None, use_proxy=True, force=False, timeout=120,
validate_certs=True, url_username=None, url_password=None,
http_agent=None, force_basic_auth=False, follow_redirects='urllib2',
client_cert=None, client_key=None, cookies=None):
self.request = Request(
headers=headers,
use_proxy=use_proxy,
force=force,
timeout=timeout,
validate_certs=validate_certs,
url_username=url_username,
url_password=url_password,
http_agent=http_agent,
force_basic_auth=force_basic_auth,
follow_redirects=follow_redirects,
client_cert=client_cert,
client_key=client_key,
cookies=cookies
)
self.last_url = None
def get_headers(self, result):
try:
return dict(result.getheaders())
except AttributeError:
return result.headers
def update_response(self, response, result):
response.headers = self.get_headers(result)
response._content = result.read()
response.status = result.getcode()
response.url = result.geturl()
response.msg = "OK (%s bytes)" % response.headers.get('Content-Length', 'unknown')
def send(self, method, url, **kwargs):
response = Response()
# Set the last_url called
#
# This is used by the object destructor to erase the token when the
# ModuleManager exits and destroys the iControlRestSession object
self.last_url = url
body = None
data = kwargs.pop('data', None)
json = kwargs.pop('json', None)
if not data and json is not None:
self.request.headers['Content-Type'] = 'application/json'
body = _json.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
if data:
body = data
if body:
kwargs['data'] = body
try:
result = self.request.open(method, url, **kwargs)
except HTTPError as e:
# Catch HTTPError delivered from Ansible
#
# The structure of this object, in Ansible 2.8 is
#
# HttpError {
# args
# characters_written
# close
# code
# delete
# errno
# file
# filename
# filename2
# fp
# getcode
# geturl
# hdrs
# headers
# info
# msg
# name
# reason
# strerror
# url
# with_traceback
# }
self.update_response(response, e)
return response
self.update_response(response, result)
return response
def delete(self, url, **kwargs):
return self.send('DELETE', url, **kwargs)
def get(self, url, **kwargs):
return self.send('GET', url, **kwargs)
def patch(self, url, data=None, **kwargs):
return self.send('PATCH', url, data=data, **kwargs)
def post(self, url, data=None, **kwargs):
return self.send('POST', url, data=data, **kwargs)
def put(self, url, data=None, **kwargs):
return self.send('PUT', url, data=data, **kwargs)
def __del__(self):
if self.last_url is None:
return
token = self.request.headers.get('X-F5-Auth-Token', None)
if not token:
return
try:
p = generic_urlparse(urlparse(self.last_url))
uri = "https://{0}:{1}/mgmt/shared/authz/tokens/{2}".format(
p['hostname'], p['port'], token
)
self.delete(uri)
except ValueError:
pass
class TransactionContextManager(object):
def __init__(self, client, validate_only=False):
self.client = client
self.validate_only = validate_only
self.transid = None
def __enter__(self):
uri = "https://{0}:{1}/mgmt/tm/transaction/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json={})
if resp.status not in [200]:
raise Exception
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
self.transid = response['transId']
self.client.api.request.headers['X-F5-REST-Coordination-Id'] = self.transid
return self.client
def __exit__(self, exc_type, exc_value, exc_tb):
self.client.api.request.headers.pop('X-F5-REST-Coordination-Id')
if exc_tb is None:
uri = "https://{0}:{1}/mgmt/tm/transaction/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.transid
)
params = dict(
state="VALIDATING",
validateOnly=self.validate_only
)
resp = self.client.api.patch(uri, json=params)
if resp.status not in [200]:
raise Exception
def download_file(client, url, dest):
"""Download a file from the remote device
This method handles the chunking needed to download a file from
a given URL on the BIG-IP.
Arguments:
client (object): The F5RestClient connection object.
url (string): The URL to download.
dest (string): The location on (Ansible controller) disk to store the file.
Returns:
bool: True on success. False otherwise.
"""
with open(dest, 'wb') as fileobj:
chunk_size = 512 * 1024
start = 0
end = chunk_size - 1
size = 0
current_bytes = 0
while True:
content_range = "%s-%s/%s" % (start, end, size)
headers = {
'Content-Range': content_range,
'Content-Type': 'application/octet-stream'
}
data = {
'headers': headers,
'verify': False,
'stream': False
}
response = client.api.get(url, headers=headers, json=data)
if response.status == 200:
# If the size is zero, then this is the first time through
# the loop and we don't want to write data because we
# haven't yet figured out the total size of the file.
if size > 0:
current_bytes += chunk_size
fileobj.write(response.raw_content)
# Once we've downloaded the entire file, we can break out of
# the loop
if end == size:
break
crange = response.headers['Content-Range']
# Determine the total number of bytes to read.
if size == 0:
size = int(crange.split('/')[-1]) - 1
# If the file is smaller than the chunk_size, the BigIP
# will return an HTTP 400. Adjust the chunk_size down to
# the total file size...
if chunk_size > size:
end = size
# ...and pass on the rest of the code.
continue
start += chunk_size
if (current_bytes + chunk_size) > size:
end = size
else:
end = start + chunk_size - 1
return True
def upload_file(client, url, src, dest=None):
"""Upload a file to an arbitrary URL.
This method is responsible for correctly chunking an upload request to an
arbitrary file worker URL.
Arguments:
client (object): The F5RestClient connection object.
url (string): The URL to upload a file to.
src (string): The file to be uploaded.
dest (string): The file name to create on the remote device.
Examples:
The ``dest`` may be either an absolute or relative path. The basename
of the path is used as the remote file name upon upload. For instance,
in the example below, ``BIGIP-13.1.0.8-0.0.3.iso`` would be the name
of the remote file.
The specified URL should be the full URL to where you want to upload a
file. BIG-IP has many different URLs that can be used to handle different
types of files. This is why a full URL is required.
>>> from ansible.module_utils.network.f5.icontrol import upload_client
>>> url = 'https://{0}:{1}/mgmt/cm/autodeploy/software-image-uploads'.format(
... self.client.provider['server'],
... self.client.provider['server_port']
... )
>>> dest = '/path/to/BIGIP-13.1.0.8-0.0.3.iso'
>>> upload_file(self.client, url, dest)
True
Returns:
bool: True on success. False otherwise.
Raises:
F5ModuleError: Raised if ``retries`` limit is exceeded.
"""
if isinstance(src, StringIO) or isinstance(src, BytesIO):
fileobj = src
else:
fileobj = open(src, 'rb')
try:
size = os.stat(src).st_size
is_file = True
except TypeError:
src.seek(0, os.SEEK_END)
size = src.tell()
src.seek(0)
is_file = False
# This appears to be the largest chunk size that iControlREST can handle.
#
# The trade-off you are making by choosing a chunk size is speed, over size of
# transmission. A lower chunk size will be slower because a smaller amount of
# data is read from disk and sent via HTTP. Lots of disk reads are slower and
# There is overhead in sending the request to the BIG-IP.
#
# Larger chunk sizes are faster because more data is read from disk in one
# go, and therefore more data is transmitted to the BIG-IP in one HTTP request.
#
# If you are transmitting over a slow link though, it may be more reliable to
# transmit many small chunks that fewer large chunks. It will clearly take
# longer, but it may be more robust.
chunk_size = 1024 * 7168
start = 0
retries = 0
if dest is None and is_file:
basename = os.path.basename(src)
else:
basename = dest
url = '{0}/{1}'.format(url.rstrip('/'), basename)
while True:
if retries == 3:
# Retries are used here to allow the REST API to recover if you kill
# an upload mid-transfer.
#
# There exists a case where retrying a new upload will result in the
# API returning the POSTed payload (in bytes) with a non-200 response
# code.
#
# Retrying (after seeking back to 0) seems to resolve this problem.
raise F5ModuleError(
"Failed to upload file too many times."
)
try:
file_slice = fileobj.read(chunk_size)
if not file_slice:
break
current_bytes = len(file_slice)
if current_bytes < chunk_size:
end = size
else:
end = start + current_bytes
headers = {
'Content-Range': '%s-%s/%s' % (start, end - 1, size),
'Content-Type': 'application/octet-stream'
}
# Data should always be sent using the ``data`` keyword and not the
# ``json`` keyword. This allows bytes to be sent (such as in the case
# of uploading ISO files.
response = client.api.post(url, headers=headers, data=file_slice)
if response.status != 200:
# When this fails, the output is usually the body of whatever you
# POSTed. This is almost always unreadable because it is a series
# of bytes.
#
# Therefore, including an empty exception here.
raise F5ModuleError()
start += current_bytes
except F5ModuleError:
# You must seek back to the beginning of the file upon exception.
#
# If this is not done, then you risk uploading a partial file.
fileobj.seek(0)
retries += 1
return True
def tmos_version(client):
uri = "https://{0}:{1}/mgmt/tm/sys/".format(
client.provider['server'],
client.provider['server_port'],
)
resp = client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
to_parse = urlparse(response['selfLink'])
query = to_parse.query
version = query.split('=')[1]
return version
def bigiq_version(client):
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-shared-all-big-iqs/devices".format(
client.provider['server'],
client.provider['server_port'],
)
query = "?$select=version"
resp = client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' in response:
version = response['items'][0]['version']
return version
raise F5ModuleError(
'Failed to retrieve BIGIQ version information.'
)
def module_provisioned(client, module_name):
provisioned = modules_provisioned(client)
if module_name in provisioned:
return True
return False
def modules_provisioned(client):
"""Returns a list of all provisioned modules
Args:
client: Client connection to the BIG-IP
Returns:
A list of provisioned modules in their short name for.
For example, ['afm', 'asm', 'ltm']
"""
uri = "https://{0}:{1}/mgmt/tm/sys/provision".format(
client.provider['server'],
client.provider['server_port']
)
resp = client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' not in response:
return []
return [x['name'] for x in response['items'] if x['level'] != 'none']
| [
"[email protected]"
]
| |
ddf082a606438d2b7b4eaa1c225de04615338997 | 4d99350a527a88110b7bdc7d6766fc32cf66f211 | /OpenGLCffi/GLES3/EXT/AMD/performance_monitor.py | 0ebc960e38c441205a44853cabf3e8f2a8205694 | [
"MIT"
]
| permissive | cydenix/OpenGLCffi | e790ef67c2f6c9877badd5c38b7d58961c8739cd | c78f51ae5e6b655eb2ea98f072771cf69e2197f3 | refs/heads/master | 2021-01-11T07:31:10.591188 | 2017-04-17T11:04:55 | 2017-04-17T11:04:55 | 80,312,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['numGroups', 'groupsSize', 'groups'])
def glGetPerfMonitorGroupsAMD(numGroups, groupsSize, groups):
pass
@params(api='gles3', prms=['group', 'numCounters', 'maxActiveCounters', 'counterSize', 'counters'])
def glGetPerfMonitorCountersAMD(group, numCounters, maxActiveCounters, counterSize, counters):
pass
@params(api='gles3', prms=['group', 'bufSize', 'length', 'groupString'])
def glGetPerfMonitorGroupStringAMD(group, bufSize, length, groupString):
pass
@params(api='gles3', prms=['group', 'counter', 'bufSize', 'length', 'counterString'])
def glGetPerfMonitorCounterStringAMD(group, counter, bufSize, length, counterString):
pass
@params(api='gles3', prms=['group', 'counter', 'pname', 'data'])
def glGetPerfMonitorCounterInfoAMD(group, counter, pname):
pass
@params(api='gles3', prms=['n', 'monitors'])
def glGenPerfMonitorsAMD(n, monitors):
pass
@params(api='gles3', prms=['n', 'monitors'])
def glDeletePerfMonitorsAMD(n, monitors):
pass
@params(api='gles3', prms=['monitor', 'enable', 'group', 'numCounters', 'counterList'])
def glSelectPerfMonitorCountersAMD(monitor, enable, group, numCounters, counterList):
pass
@params(api='gles3', prms=['monitor'])
def glBeginPerfMonitorAMD(monitor):
pass
@params(api='gles3', prms=['monitor'])
def glEndPerfMonitorAMD(monitor):
pass
@params(api='gles3', prms=['monitor', 'pname', 'dataSize', 'data', 'bytesWritten'])
def glGetPerfMonitorCounterDataAMD(monitor, pname, dataSize, bytesWritten):
pass
| [
"[email protected]"
]
| |
410b9989382a4f8aa1248d40affccc169854c326 | c8705e8f8797ebdd6c76e8aa697d7ed9da46f3c3 | /colorpicker/models.py | 177cab327cb231f450d3e90323b1f4c21f356eb4 | []
| no_license | zokis/django-colorpicker | 1cedcb511011c504165a687c19848140f3656562 | f391341969a29e7de1dc1887ef9e9fadd8669216 | refs/heads/master | 2020-12-30T18:29:58.990911 | 2013-02-15T13:30:54 | 2013-02-15T13:30:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,336 | py | # -*- coding: utf-8 -*-
from django.core.validators import ValidationError
from django.db.models import CharField
from widgets import ColorPickerWidget
from forms import ColorField as ColorFormField
from utils import (is_valid_alpha_hex, is_valid_hex, is_valid_rgb,
is_valid_rgba, rgba_to_alpha_hex, rgb_to_hex, hex_to_rgb)
FORMAT_RGB = 'rgb'
FORMAT_HEX = 'hex'
FORMAT_RGBA = 'rgba'
FORMAT_HEXA = 'hexa'
FORMATS = (FORMAT_RGB, FORMAT_HEX, FORMAT_RGB, FORMAT_HEXA)
class ColorField(CharField):
def __init__(self, format='hex', *args, **kwargs):
kwargs['max_length'] = 25
self.format = format
super(ColorField, self).__init__(*args, **kwargs)
def formfield(self, *args, **kwargs):
kwargs['widget'] = ColorPickerWidget(format=self.format)
kwargs['form_class'] = ColorFormField
return super(ColorField, self).formfield(*args, **kwargs)
def clean(self, value, model_instance):
'''
Valida cores nos formatos RGB RGBA #RRGGBB e #RRGGBBAA
'''
import re
invalid = 'Cor %s inválida' % self.format.upper()
value = value.replace(' ', '')
if self.format == FORMAT_RGB:
regex = re.compile("rgb\(\d{1,3},\d{1,3},\d{1,3}\)",
re.IGNORECASE | re.UNICODE)
is_valid = is_valid_rgb
elif self.format == FORMAT_RGBA:
regex = re.compile("rgba\((?P<r>\d{1,3}),(?P<g>\d{1,3}),(?P<b>\d{1,3}),(?P<a>(0\.\d+)|\d)\)",
re.IGNORECASE | re.UNICODE)
is_valid = is_valid_rgba
elif format == FORMAT_HEXA:
regex = re.compile("#([A-Fa-f\d]{8}|[A-Fa-f\d]{6}|[A-Fa-f\d]{3})",
re.IGNORECASE | re.UNICODE)
is_valid = is_valid_alpha_hex
else:
regex = re.compile("#([A-Fa-f\d]{8}|[A-Fa-f\d]{6}|[A-Fa-f\d]{3})",
re.IGNORECASE | re.UNICODE)
is_valid = is_valid_hex
if len(regex.findall(value)) != 1:
raise ValidationError(invalid)
if not is_valid(value):
raise ValidationError(invalid)
return super(ColorField, self).clean(value, model_instance)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^colorpicker\.models\.ColorField"])
except ImportError:
pass
| [
"[email protected]"
]
| |
10bb32abb023447157a766575d4476a86ed88ecf | 3863c069014bccc095e66d956af7900249ebf784 | /ir/bm25_ranker.py | e7fec5bef3cb810a935fcf5ddbd098b108bb84e2 | []
| no_license | asvishen/Factoid-Question-Answering | 28403c3ef60b36b44e6efe3efdad74524a32a200 | 0516aebf5f80c6cfa51475ae2c32dea0ef325719 | refs/heads/master | 2020-12-03T02:01:29.614281 | 2013-05-31T20:57:18 | 2013-05-31T20:57:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,504 | py | # -*- coding: utf-8 -*-
'''
Rank candidate texts by their similarity to the query.
@author: gavin hackeling
'''
from __future__ import division
from nltk import word_tokenize
from math import log
from collections import defaultdict
class BM25_calc:
def __init__(self, query, c):
self.k1 = 1.2
self.b = 0.75
#self.stop_words = ['the', 'is', 'are', 'am', 'was', 'have', 'had', 'has',
#'a', 'an', 'be', 'did', 'does', 'do', 'to', ]
#self.query = [t.lower() for t in query if t.lower() not in self.stop_words]
self.query = [t.lower() for t in query]
self.original_collection = c
c = [d.lower() for d in c]
self.collection = [word_tokenize(d) for d in c]
self.avg_len = sum([len(d) for d in self.collection]) / len(c)
self.freq_counter = defaultdict(int)
def get_num_docs_containing(self, token):
num = 0
for document in self.collection:
if token in document:
num += 1
return num
# TODO do this once
def get_tf(self, token, document):
counter = defaultdict(int)
for word in document:
#if word not in self.stop_words:
counter[word] += 1
return counter[token]
def get_idf(self, token):
N = len(self.collection)
nq = self.get_num_docs_containing(token)
top = N - nq + 0.5
bottom = nq + 0.5
idf = log(top / bottom)
return max(.5, idf)
def score(self, document):
score = 0
for token in self.query:
tf = self.get_tf(token, document)
idf = self.get_idf(token)
top = tf * (self.k1 + 1)
bottom = tf + self.k1 * (
1 - self.b + self.b * (len(document) / self.avg_len))
s = idf * (top / bottom)
score += max(s, 0)
return score
def rank(self):
scores = []
for document_index, document in enumerate(self.collection):
s = self.score(document)
scores.append((s, document, document_index))
scores.sort(key=lambda tup: tup[0], reverse=True)
originals = []
for i in scores:
originals.append(self.original_collection[i[2]])
return originals
if __name__ == '__main__':
query = 'did the Ravens win the Super Bowl?'
query = word_tokenize(query)
collection = [
'The Baltimore Ravens would later win Super Bowl XLVII in 2013 against the San Francisco 49ers.',
"Ray Lewis was a member of both Ravens' Super Bowl wins.",
'75 Jonathan Ogden elected in 2013 played for Ravens 1996–2007 won Super Bowl XXXV Retired numbers.',
'The Ravens officially have no retired numbers.',
"Michael Crabtree never had a chance to make a catch in the end zone on what turned out to be the San Francisco 49ers' last play of Super Bowl XLVII a 3431 loss to ",
'Ravens quarterback Trent Dilfer and wide receiver ',
' The Ravens became the third wildcard team to win the Super Bowl.',
'The Oakland Raiders did it in 1981 and ',
'The Baltimore Ravens have appeared in two Super Bowls and won both of them.',
'Here are the results victories in bold Super Bowl XXXV 12801 Baltimore 34 New ',
'the and'
]
#collection = [
#'The Oakland Raiders did it in 1981 and ',
#]
bm25_calc = BM25_calc(query, collection)
ranked = bm25_calc.rank() | [
"[email protected]"
]
| |
31c8805acb94964e9727c888e6b71f3bacfefb7f | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/commotion/helpers.py | 9e9244763a1c14490e6638133246a23eaba87248 | []
| no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,875 | py | import bpy
def show_error_message(message):
def draw(self, context):
self.layout.label(message)
bpy.context.window_manager.popup_menu(draw, title='Error', icon='ERROR')
def shape_list_refresh(context):
scene = context.scene
skcoll = scene.commotion_skcoll
if hasattr(scene, 'commotion_skcoll'):
for sk in skcoll:
skcoll.remove(0)
i = 0
for kb in context.active_object.data.shape_keys.key_blocks:
skcoll.add()
skcoll[i].name = kb.name
skcoll[i].index = i
i += 1
def update_sp(self, context):
scene = context.scene
skcoll = scene.commotion_skcoll
props = scene.commotion
key = context.active_object.data.shape_keys
if key.use_relative:
for sk in skcoll:
if sk.selected:
key.key_blocks[sk.index].value = props.shape_value
else:
for ob in context.selected_objects:
for sk in skcoll:
if sk.selected:
ob.data.shape_keys.key_blocks[sk.index].interpolation = props.shape_interpolation
def auto_keyframes(context):
frame = context.scene.frame_current
for ob in context.selected_objects:
key = ob.data.shape_keys
key.eval_time = int(key.key_blocks[1].frame)
key.keyframe_insert(data_path='eval_time', frame=frame)
key.eval_time = int(key.key_blocks[-1].frame)
key.keyframe_insert(data_path='eval_time', frame=frame + 20)
def keyframes_offset(fcus, i, context):
frame = context.scene.frame_current
for fcu in fcus:
fcu_range = fcu.range()[0]
for kp in fcu.keyframe_points:
kp.co[0] = kp.co[0] + frame + i - fcu_range
kp.handle_left[0] = kp.handle_left[0] + frame + i - fcu_range
kp.handle_right[0] = kp.handle_right[0] + frame + i - fcu_range
def strips_offset(strip, i, context):
frame = context.scene.frame_current
strip.frame_end = frame - 1 + i + strip.frame_end
strip.frame_start = frame + i
strip.scale = 1
def data_access(mode, ob, i, context):
if 'FCURVES' in mode:
if 'SHAPE_KEYS' in mode:
fcus = ob.data.shape_keys.animation_data.action.fcurves
elif 'OBJECT' in mode:
fcus = ob.animation_data.action.fcurves
keyframes_offset(fcus, i, context)
elif 'NLA' in mode:
if 'SHAPE_KEYS' in mode:
strip = ob.data.shape_keys.animation_data.nla_tracks[0].strips[0]
elif 'OBJECT' in mode:
strip = ob.animation_data.nla_tracks[0].strips[0]
strips_offset(strip, i, context)
elif 'PARENT' in mode:
ob.use_slow_parent = True
ob.slow_parent_offset = i
def offset_cursor(offset, threshold, mode, context):
cursor = context.scene.cursor_location
dist = {}
for ob in context.selected_objects:
distance = (cursor - (ob.location + ob.delta_location)).length
dist[ob] = distance
if 'REVERSE' in mode:
dist = sorted(dist, key=dist.get, reverse=True)
else:
dist = sorted(dist, key=dist.get)
i = 0
i2 = threshold
for ob in dist:
data_access(mode, ob, i, context)
if i2 > 1:
if i2 <= (dist.index(ob) + 1):
i2 += threshold
i += offset
else:
i += offset
def offset_name(offset, threshold, mode, context):
obs = context.selected_objects
dist = {}
for ob in obs:
dist[ob] = ob.name
if 'REVERSE' in mode:
dist = sorted(dist, key=dist.get, reverse=True)
else:
dist = sorted(dist, key=dist.get)
i = 0
i2 = threshold
for ob in dist:
data_access(mode, ob, i, context)
if i2 > 1:
if i2 <= (dist.index(ob) + 1):
i2 += threshold
i += offset
else:
i += offset
def offset_parent(offset, context):
mode = ['PARENT']
dist = {}
for ob in context.selected_objects:
if ob.parent:
distance = (ob.parent.location - (ob.location + ob.delta_location + ob.parent.location)).length
dist[ob] = distance
dist = sorted(dist, key=dist.get)
i = 0 + offset
for ob in dist:
data_access(mode, ob, i, context)
i += offset
def offset_multitarget(objects, targets, offset, threshold, mode, context):
obs = {}
for ob in objects:
targs = {}
for t in targets:
distance = (t.location - (ob.location + ob.delta_location)).length
targs[distance] = t
dist = sorted(targs)[0]
obs[ob] = [dist, targs[dist]]
for t in targets:
obs_thold = []
i = 0
i2 = threshold
if 'REVERSE' in mode:
obs_sorted = sorted(obs, key=obs.get, reverse=True)
else:
obs_sorted = sorted(obs, key=obs.get)
for ob in obs_sorted:
if obs[ob][1] == t:
data_access(mode, ob, i, context)
if i2 > 1:
obs_thold.append(ob)
if i2 <= (obs_thold.index(ob) + 1):
i += offset
i2 += threshold
else:
i += offset
def create_nla_tracks(anim):
frst_frame = anim.action.frame_range[0]
if not anim.nla_tracks:
anim.nla_tracks.new()
anim.nla_tracks[0].strips.new('name', frst_frame, anim.action)
anim.action = None
def create_strips(mode, context):
obs = context.selected_objects
if 'SHAPE_KEYS' in mode:
for ob in obs:
if ob.data.shape_keys:
anim = ob.data.shape_keys.animation_data
else:
return show_error_message('Selected objects have no Shape Keys')
create_nla_tracks(anim)
elif 'OBJECT' in mode:
for ob in obs:
if ob.animation_data:
anim = ob.animation_data
else:
return show_error_message('Selected objects have no Animation')
create_nla_tracks(anim)
def link_strips(obj_strip, ob_strip):
obj_a_s = obj_strip.action_frame_start
obj_a_e = obj_strip.action_frame_end
ob_strip.action = obj_strip.action
ob_strip.action_frame_start = obj_a_s
ob_strip.action_frame_end = obj_a_e
def link_to_active(mode, context):
obj = context.active_object
obs = context.selected_objects
if 'NLA' in mode:
if 'SHAPE_KEYS' in mode:
obj_strip = obj.data.shape_keys.animation_data.nla_tracks[0].strips[0]
for ob in obs:
ob_strip = ob.data.shape_keys.animation_data.nla_tracks[0].strips[0]
link_strips(obj_strip, ob_strip)
elif 'OBJECT' in mode:
obj_strip = obj.animation_data.nla_tracks[0].strips[0]
for ob in obs:
ob_strip = ob.animation_data.nla_tracks[0].strips[0]
link_strips(obj_strip, ob_strip)
elif 'FCURVES' in mode:
if 'SHAPE_KEYS' in mode:
action = obj.data.shape_keys.animation_data.action
for ob in obs:
if ob.data.shape_keys.animation_data:
ob.data.shape_keys.animation_data.action = action
else:
ob.data.shape_keys.animation_data_create()
ob.data.shape_keys.animation_data.action = action
elif 'OBJECT' in mode:
action = obj.animation_data.action
for ob in obs:
if ob.animation_data:
ob.animation_data.action = action
else:
ob.animation_data_create()
ob.animation_data.action = action
def copy_to_selected(mode, context):
obj = context.active_object
obs = context.selected_objects
if 'SHAPE_KEYS' in mode:
action = obj.data.shape_keys.animation_data.action
for ob in obs:
if ob.data.shape_keys:
if ob.data.shape_keys.animation_data:
ob.data.shape_keys.animation_data.action = action.copy()
else:
ob.data.shape_keys.animation_data_create()
ob.data.shape_keys.animation_data.action = action.copy()
else:
return show_error_message('Selected objects have no Shape Keys')
elif 'OBJECT' in mode:
action = obj.animation_data.action
for ob in obs:
if ob.animation_data:
ob.animation_data.action = action.copy()
else:
ob.animation_data_create()
ob.animation_data.action = action.copy()
def remove_nla_track(anim):
trks = anim.nla_tracks
anim.action = trks[0].strips[0].action
trks.remove(trks[0])
def strips_to_fcurves(mode, context):
obs = context.selected_objects
if 'SHAPE_KEYS' in mode:
for ob in obs:
anim = ob.data.shape_keys.animation_data
remove_nla_track(anim)
elif 'OBJECT' in mode:
for ob in obs:
anim = ob.animation_data
remove_nla_track(anim)
def sync_len(mode, context):
obs = context.selected_objects
if 'SHAPE_KEYS' in mode:
for ob in obs:
strip = ob.data.shape_keys.animation_data.nla_tracks[0].strips[0]
strip.action_frame_end = (strip.action_frame_start + strip.action.frame_range[1] - 1)
elif 'OBJECT' in mode:
for ob in obs:
strip = ob.animation_data.nla_tracks[0].strips[0]
strip.action_frame_end = (strip.action_frame_start + strip.action.frame_range[1] - 1)
def driver_set(context):
obj = context.active_object
try:
for ob in context.selected_objects:
if ob != obj:
key = ob.data.shape_keys
kb = int(key.key_blocks[1].frame)
kb_last = str(int(key.key_blocks[-1].frame) + 5)
key.driver_add('eval_time')
fcus = ob.data.shape_keys.animation_data.drivers
for fcu in fcus:
if fcu.data_path == 'eval_time':
drv = fcu.driver
drv.type = 'SCRIPTED'
drv.expression = kb_last + '-(dist*3/sx)'
drv.show_debug_info = True
var = drv.variables.new()
var.name = 'dist'
var.type = 'LOC_DIFF'
var.targets[0].id = ob
var.targets[1].id = obj
var = drv.variables.new()
var.name = 'sx'
var.type = 'SINGLE_PROP'
var.targets[0].id = obj
var.targets[0].data_path = 'scale[0]'
if fcu.modifiers:
fcu.modifiers.remove(fcu.modifiers[0])
fcu.keyframe_points.insert(0, kb)
fcu.keyframe_points.insert(kb, kb)
fcu.keyframe_points.insert(kb + 10, kb + 10)
fcu.extrapolation = 'LINEAR'
for kp in fcu.keyframe_points:
kp.interpolation = 'LINEAR'
except:
return show_error_message('Selected objects have no Shape Keys')
def targets_remap(context):
for ob in context.selected_objects:
fcus = ob.data.shape_keys.animation_data.drivers
for fcu in fcus:
if fcu.data_path == 'eval_time':
for var in fcu.driver.variables:
if var.name == 'dist':
var.targets[0].id = ob
def expression_copy(context):
active_fcus = context.active_object.data.shape_keys.animation_data.drivers
for active_fcu in active_fcus:
if active_fcu.data_path == 'eval_time':
for ob in context.selected_objects:
fcus = ob.data.shape_keys.animation_data.drivers
for fcu in fcus:
if fcu.data_path == 'eval_time':
fcu.driver.expression = active_fcu.driver.expression
def dist_trigger(var, name):
etm = bpy.context.scene.objects[name].data.shape_keys.eval_time
if var > etm:
etm = var
return etm
def register_driver_function(context):
bpy.app.driver_namespace['dist_trigger'] = dist_trigger
for ob in context.scene.objects:
if (ob.data and ob.data.shape_keys and
ob.data.shape_keys.animation_data and
ob.data.shape_keys.animation_data.drivers):
fcus = ob.data.shape_keys.animation_data.drivers
for fcu in fcus:
if fcu.data_path == 'eval_time':
fcu.driver.expression = fcu.driver.expression
def expression_func_set(context):
props = context.scene.commotion
expr = props.sk_drivers_expression_func
for ob in context.selected_objects:
func_expr = "dist_trigger(%s, '%s')" % (expr, ob.name)
fcus = ob.data.shape_keys.animation_data.drivers
for fcu in fcus:
if fcu.data_path == 'eval_time':
fcu.driver.expression = func_expr
| [
"[email protected]"
]
| |
c6a6239b372a7a6543add1f815a61de4f4418db6 | 060b39ef80a00090732b5362427b1f96eda81d09 | /DataProcess/run_feature_extraction.py | 2560d4c88bb1ace582393d880fb054727ddd45c1 | []
| no_license | hphp/Kaggle | 73a7fd4b19b4f1cf6616f72a4309d4769c8a8535 | b27422f8b913c47f484e3abebb1f7aaf6607c6a4 | refs/heads/master | 2016-09-09T22:36:21.945873 | 2013-12-10T04:05:00 | 2013-12-10T04:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,617 | py | #!/usr/bin/python
'''
written by hp_carrot
2013-11-26
add resized_pixel_fe()
'''
import os
def convolution_feature_extraction():
piclist = os.listdir("/home/hphp/Documents/data/Kaggle/DogVsCatData/test1/")
t_range = len(piclist)
period = 1000
total = int(t_range/period)
print total
for rr in range(200,total):
start = rr * 1000
end = min((rr+1)*1000,t_range)
cmd = "python feature_extraction.py " + str(start) + " " + str(end)
os.system(cmd)
def color_HSV_feature_extraction(DataHome,img_data_dir,data_csv_file):
piclist = os.listdir(DataHome + img_data_dir)
t_range = len(piclist)
period = 1000
total = int(t_range/period) + 1
print total
for rr in range(total):
start = rr * 1000
end = min((rr+1)*1000,t_range)
if start >= end :
break
cmd = "python DogVsCat_get_hsv_feature.py " + str(start) + " " + str(end) + " " + img_data_dir + " " + data_csv_file
print cmd
os.system(cmd)
def resized_pixel_fe(DataHome, src_img_route, train_feature_filename, valid_feature_filename):
piclist = os.listdir(DataHome + src_img_route)
t_range = len(piclist)
period = 1000
total = int(t_range/period) + 1
print total
for rr in range(total):
start = rr * 1000
end = min((rr+1)*1000,t_range)
if start >= end :
break
cmd = "python DogVsCat_patchtrain_feature.py " + DataHome + " " + src_img_route + " " + train_feature_filename + " " + valid_feature_filename + " " + str(start) + " " + str(end)
print cmd
os.system(cmd)
def g_resized_pixel_fe(cmd_part1, t_range, period):
total = int(t_range/period) + 1
print total
for rr in range(total):
start = rr * period
end = min((rr+1)*period, t_range)
if start >= end :
break
cmd = cmd_part1 + " " + str(start) + " " + str(end)
print cmd
os.system(cmd)
piclist = os.listdir("/home/hphp/Documents/data/Kaggle/CIFAR-10/train/")
t_range = len(piclist)
g_resized_pixel_fe("python feature_extraction_pixel_frm_img.py /home/hphp/Documents/data/Kaggle/CIFAR-10/ train/ train_feature_pixel_v.csv 32 32", t_range, 1000)
#DogVsCat_DataHome = "/home/hphp/Documents/data/Kaggle/DogVsCatData/"
#resized_pixel_fe(DogVsCat_DataHome, "train/", DogVsCat_DataHome+"DogVsCat_train_feature_1w.csv", DogVsCat_DataHome+"DogVsCat_valid_feature_1w.csv")
#color_HSV_feature_extraction(DogVsCat_DataHome,"test1/","test.csv")
#color_HSV_feature_extraction(DogVsCat_DataHome,"train/","train.csv")
| [
"[email protected]"
]
| |
e55b5369d0cbee68194ee983acf794ce6412cbd6 | d8a511b5b871740c13e41079657421ad08e26978 | /wagtailmedia/signal_handlers.py | 80f6db0c29f016c9c6676cd1ea900192b6a38555 | [
"BSD-3-Clause"
]
| permissive | evanwinter/wagtailmedia | 0be38630e9f1375506ba3a5b6b10eee72247dcd8 | e5cc000d6741f78ee44834c1469b64da40a164ed | refs/heads/master | 2020-04-14T18:29:34.129054 | 2019-01-03T20:44:32 | 2019-01-03T20:44:32 | 161,226,229 | 0 | 1 | NOASSERTION | 2018-12-10T19:30:39 | 2018-12-10T19:30:39 | null | UTF-8 | Python | false | false | 472 | py | from django.conf import settings
from django.db import transaction
from django.db.models.signals import post_delete, pre_save
from wagtailmedia.models import get_media_model
def post_delete_file_cleanup(instance, **kwargs):
# Pass false so FileField doesn't save the model.
transaction.on_commit(lambda: instance.file.delete(False))
def register_signal_handlers():
Media = get_media_model()
post_delete.connect(post_delete_file_cleanup, sender=Media)
| [
"[email protected]"
]
| |
aad9d93b67d623651393d22114af6f64db39f48d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_138/202.py | 24acce18318d21c58ecc931e58583447ad9cae57 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | from bisect import bisect_left
from copy import copy
def get_results(N, K):
N.sort()
K.sort()
L = len(N)
dwar_points = 0
ken_start = 0
for i in xrange(L):
if N[i] > K[ken_start]:
dwar_points += 1
ken_start += 1
war_points = 0
for i in xrange(len(N)-1,-1,-1):
ken_pos = bisect_left(K, N[i])
if ken_pos == len(K):
ken_choice = 0
else:
ken_choice = ken_pos
if N[i] > K[ken_choice]:
war_points += 1
del N[i]
del K[ken_choice]
return (dwar_points, war_points)
def solve(in_name, out_name):
fin = open(in_name, 'r')
L = fin.readlines()
fin.close()
T = int(L[0])
k = 1
res = []
for i in xrange(T):
n = int(L[k])
N = map(float, L[k+1].strip().split())
K = map(float, L[k+2].strip().split())
k += 3
results = get_results(N, K)
res.append('Case #' + str(i+1) + ': ' + str(results[0]) + ' ' + str(results[1]) + '\n')
fout = open(out_name, 'w')
fout.writelines(res)
fout.close()
return
#solve('D-test.in', 'D-test.out')
#solve('D-small-attempt0.in', 'D-small-attempt0.out')
solve('D-large.in', 'D-large.out')
| [
"[email protected]"
]
| |
c163e2cb577dfcda6f3358d435861abcf43a11e1 | ab32e6384b7c679a327a4bf1df6dd24c058b78a5 | /cms/base.py | f994beec56933f28b7297703bb7637ad770aaac1 | []
| no_license | webmaxdev0110/digi-django | ad2497791d6d3b6aa74eb697dd7eef324ebb5846 | 4cd52c07bb64e9d9381a957323d277489a02181a | refs/heads/master | 2020-03-23T13:37:12.600565 | 2017-07-10T10:23:15 | 2017-07-10T10:23:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | """
Our base representation for different types of page models.
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from feincms.admin import item_editor
from feincms.models import create_base_model
class SimplePageManager(models.Manager):
def published(self):
"""
Filter by pages that are marked as active/published.
"""
return self.filter(published=True)
SimplePageAdmin = item_editor.ItemEditor
class SimplePage(create_base_model(inherit_from=models.Model)):
"""
A simple wrapper on the feincms base model with some common fields
set for use in implemented types.
"""
published = models.BooleanField(_('published'), default=False)
title = models.CharField(_('title'), max_length=100,
help_text=_('This is used for the generated navigation too.'))
class Meta(object):
abstract = True
verbose_name = _('simple page')
verbose_name_plural = _('simple pages')
objects = SimplePageManager()
def __str__(self):
return self.title
| [
"[email protected]"
]
| |
109022396ac7b45bbcd47850815b3f7da8cc38d3 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/1519.py | 648f239ba0d124b8971fef4c06e15947f1995be6 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from bisect import insort, bisect_left, bisect_right
def palin(x):
return str(x) == str(x)[::-1]
arr = []
def gen(N):
for x in range(1, int(N**.5)+1):
if palin(x) and palin(x*x) and 1 <= x*x <= N:
insort(arr, x*x)
def solve(A, B):
l = bisect_left(arr, A)
r = bisect_right(arr, B)
return r-l
if __name__ == '__main__':
gen(10**14)
T = int(raw_input())
for case in range(1,T+1):
A, B = map(int, raw_input().split())
print "Case #{}: {}".format(case, solve(A, B))
| [
"[email protected]"
]
| |
11071e8ceadb00b104d22972d509236a54c3253f | e839a2fdd40effd2cea9c8bbea1629e7a5b453dc | /appinit_backend/app/lib/files/remove.py | da38d92a6b1876c25ec6ed96778bacaa81dab65b | [
"MIT"
]
| permissive | lost-osiris/webplatform-backend | bfb48979fabd0d04104b3b07bd9b7cad2d6cfce6 | 8b1b7c94dbc5314450fbe75b8ca4625d39608d4a | refs/heads/master | 2021-07-06T00:12:32.257988 | 2019-08-21T08:45:21 | 2019-08-21T08:45:21 | 177,480,021 | 0 | 0 | MIT | 2020-09-04T09:08:18 | 2019-03-24T23:04:03 | Python | UTF-8 | Python | false | false | 601 | py | from lib.imports.default import *
import lib.files.meta as meta
action = "remove"
def call(**kwargs):
if "id" in kwargs:
file_obj = meta.call(id=kwargs['id'])
if not file_obj['isAttached']:
__remove(kwargs['id'])
elif "ids" in kwargs and type(kwargs["ids"]) is list:
for fid in kwargs["ids"]:
file_obj = meta.call(id=fid)
if not meta.call['isAttached']:
__remove(fid)
return True
def __remove(file_id):
import gridfs
manager = Manager()
db = manager.db("files")
fs = gridfs.GridFS(db)
fs.delete(ObjectId(file_id)) | [
"[email protected]"
]
| |
2a0e99bf5aef26fa2fcfc7edcc980199c3190c6c | d10a8314da8ef71d2e63c0ecfbdc60a1cf2d78e2 | /calculate_next/lib/template/parser/template/formats/world.py | d6a2422d3eba9d40fa266c3eeea24f61247cd7da | []
| no_license | Yuego/calculate-experiments | d005376dc6fb0002ac0016beb878d7274707a39e | 40cd601bfea604c887c213d70303938367b7b3b1 | refs/heads/master | 2021-01-22T23:58:04.817503 | 2013-11-22T10:19:48 | 2013-11-22T10:19:48 | 14,140,182 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | #coding: utf-8
from __future__ import unicode_literals, absolute_import
from pyparsing import *
from calculate_next.lib.template.parser.rules import slotted_package_atom
from calculate_next.lib.template.parser.template.parser import FormatParser
class WorldFormatParser(FormatParser):
comment = '#'
@classmethod
def _value_atom(cls, s, l, t):
return {t[0].strip(): None}
def get_syntax(self):
_command = Word('!^+-', exact=1)
comment = self.get_comment_rules()
value = Combine(Optional(_command) + slotted_package_atom).setParseAction(self._value_atom)
syntax = ZeroOrMore(comment | value)
return syntax
def collapse_tree(self, d, depth=0):
comments = d.pop('__comments')
result = []
idx = 0
for k, v in d.items():
while idx in comments:
result.extend([comments.pop(idx), '\n'])
idx += 1
idx += 1
result.extend([k, '\n'])
for comment in comments.values():
result.extend([comment, '\n'])
return ''.join(result)
| [
"[email protected]"
]
| |
1a848ab9ed33cb6c5cfa7e042a832a8136ea3894 | 2d0e5f5c6dd2e44ecf4166c81caff17f39c0c638 | /매일 프로그래밍/20201123/solution.py | 8c911b44122c8a89e11407f4557f4b7a1e374f97 | []
| no_license | limkeunhyeok/daily-coding | 17d120a9f499189be3250a501e73e312802508a9 | 960dad7758c99619da0a33c899d5d4d8d8ff524d | refs/heads/master | 2022-04-30T22:32:54.173400 | 2022-04-16T13:15:49 | 2022-04-16T13:15:49 | 174,705,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | # 단일 연결 리스트(singly linked list)가 주어지면 리스트의 중간 노드 값을 프린트 하시오. (제일 효율적인 방법으로)
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def append(self, data):
newNode = Node(data)
if self.head is None:
self.head = newNode
else:
current = self.head
while current.next is not None:
current = current.next
current.next = newNode
def findMiddle(List):
if List.head is None:
print('-1')
else:
pt1 = List.head
pt2 = List.head
size = 1
while pt1.next is not None:
pt1 = pt1.next
size += 1
cnt = size // 2
while cnt != 0:
pt2 = pt2.next
cnt -= 1
print(pt2.data)
l = LinkedList()
l.append(1)
l.append(2)
l.append(3)
l.append(4)
l.append(5)
findMiddle(l) # 3 | [
"[email protected]"
]
| |
65afa1f2ec2766360a434863c0492058e97d2aeb | 2a4ad073755ff447926e44b7c2e0b56b5ded37d2 | /algorithm/sort algorithm/merge_sort.py | 92e56c286905eec9488110d807ad37f09c0b8983 | []
| no_license | wcb2213/Learning_notes | 3a9b3fdb7df5c6844a9031db8dd7e9dd858e093c | d481e1754c15c91557027bee872f4d97da3c0fca | refs/heads/master | 2021-07-06T15:54:56.199655 | 2020-09-04T14:05:50 | 2020-09-04T14:05:50 | 174,832,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | #!/usr/bin/env/ python
# -*- coding:utf-8 -*-
# Created by: Vanish
# Created on: 2019/6/14
"""归并排序"""
# 时间复杂度(平均) 时间复杂度(最坏) 时间复杂度(最好) 空间复杂度 稳定性 复杂性
# O(nlog2n)O(nlog2n) O(nlog2n)O(nlog2n) O(nlog2n)O(nlog2n) O(n)O(n) 稳定 较复杂
def MergeSort(lists):
if len(lists) < 2:
return lists
num = len(lists) // 2
left = MergeSort(lists[:num])
right = MergeSort(lists[num:])
return Merge(left, right)
def Merge(left,right): # 两个有序列表合成一个有序列表
r, l=0, 0
res=[]
while l<len(left) and r<len(right):
if left[l] <= right[r]:
res.append(left[l])
l += 1
else:
res.append(right[r])
r += 1
res += list(left[l:])
res += list(right[r:])
return res
if __name__ == '__main__':
lists = [2, 3, 5, 7, 1, 4, 6, 15, 5, 2, 7, 9, 10, 15, 9, 17, 12]
print(MergeSort(lists)) | [
"[email protected]"
]
| |
551e00715914982da405d9a73c65f21cb2aa1ea4 | 2b8d4e22d10ca118fba0100cc87af04f3939448f | /odoo app/dymax/module/modules/warehouse_stock_restrictions/__manifest__.py | 4e1798a9a3ecd835fdcc84b185c9bd028aadcdd0 | []
| no_license | ahmed-amine-ellouze/personal | f10c0a161da709f689a3254ec20486411102a92d | 4fe19ca76523cf274a3a85c8bcad653100ff556f | refs/heads/master | 2023-03-28T23:17:05.402578 | 2021-03-25T13:33:18 | 2021-03-25T13:33:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | # -*- coding: utf-8 -*-
{
'name': "Warehouse Restrictions",
'summary': """
Warehouse and Stock Location Restriction on Users.""",
'description': """
This Module Restricts the User from Accessing Warehouse and Process Stock Moves other than allowed to Warehouses and Stock Locations.
""",
'author': "Techspawn Solutions",
'website': "http://www.techspawn.com",
'category': 'Warehouse',
'version': '14.0',
'images': ['static/description/WarehouseRestrictions.jpg'],
'depends': ['base', 'stock'],
'data': [
'security/ir.model.access.csv',
'security/security.xml',
'views/users_view.xml',
],
}
| [
"[email protected]"
]
| |
41cbd95a1732dcc78afb0031cdcd749613a85d01 | f7aa97fe19b431523f35dc5badc9e8ff919ffa00 | /fss17/project/tools/Discretize.py | 843b20424c0b5b1183c3b4e3a70057c7d79357e5 | [
"Apache-2.0"
]
| permissive | rahlk/fss17 | 3b331427d450c5bb46b71b4aa5c77c59a8ec0a70 | 49e22c4ad01ff751f24c3e5702b7fa36a3a18e96 | refs/heads/master | 2021-01-19T18:03:13.364689 | 2017-12-12T12:51:28 | 2017-12-12T12:51:28 | 101,105,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,945 | py | """
An instance filter that discretizes a range of numeric attributes in the dataset into nominal attributes. Discretization is by Fayyad & Irani's MDL method (the default).
For more information, see:
Usama M. Fayyad, Keki B. Irani: Multi-interval discretization of continuous valued attributes for classification learning. In: Thirteenth International Joint Conference on Artificial Intelligence, 1022-1027, 1993.
Igor Kononenko: On Biases in Estimating Multi-Valued Attributes. In: 14th International Joint Conference on Articial Intelligence, 1034-1040, 1995.
Dougherty, James, Ron Kohavi, and Mehran Sahami. "Supervised and unsupervised discretization of continuous features." Machine learning: proceedings of the twelfth international conference. Vol. 12. 1995.
"""
from __future__ import division, print_function
from misc import *
import numpy as np
import pandas as pd
from pdb import set_trace
from collections import Counter
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier as CART
def fWeight(tbl):
"""
Sort features based on entropy
"""
clf = CART(criterion='entropy')
features = tbl.columns[:-1]
klass = tbl[tbl.columns[-1]]
try:
clf.fit(tbl[features], [k == True for k in klass])
lbs = clf.feature_importances_
except ValueError:
set_trace()
return [tbl.columns[i] for i in np.argsort(lbs)[::-1]]
def discretize(feature, klass, atleast=-1, discrete=False):
"""
Recursive Minimal Entropy Discretization
````````````````````````````````````````
Inputs:
feature: A list or a numpy array of continuous attributes
klass: A list, or a numpy array of discrete class labels.
atleast: minimum splits.
Outputs:
splits: A list containing suggested spilt locations
"""
def measure(x):
def ent(x):
C = Counter(x)
N = len(x)
return sum([-C[n] / N * np.log(C[n] / N) for n in C.keys()])
def stdev(x):
if np.isnan(np.var(x) ** 0.5):
return 0
return np.var(x) ** 0.5
if not discrete:
return ent(x)
else:
return stdev(x)
# Sort features and klass
feature, klass = sorted(feature), [k for (f, k) in
sorted(zip(feature, klass))]
splits = []
gain = []
lvl = 0
def redo(feature, klass, lvl):
if len(feature) > 0:
E = measure(klass)
N = len(klass)
T = [] # Record boundaries of splits
for k in xrange(len(feature)):
west, east = feature[:k], feature[k:]
k_w, k_e = klass[:k], klass[k:]
N_w, N_e = len(west), len(east)
T += [N_w / N * measure(k_w) + N_e / N * measure(k_e)]
T_min = np.argmin(T)
left, right = feature[:T_min], feature[T_min:]
k_l, k_r = klass[:T_min], klass[T_min:]
# set_trace()
def stop(k, k_l, k_r):
gain = E - T[T_min]
def count(lst): return len(Counter(lst).keys())
delta = np.log2(float(3 ** count(k) - 2)) - (
count(k) * measure(k) - count(k_l) * measure(k_l) - count(
k_r) * measure(k_r))
# print(gain, (np.log2(N-1)+delta)/N)
return gain < (np.log2(N - 1) + delta) / N or T_min == 0
if stop(klass, k_l, k_r) and lvl >= atleast:
if discrete:
splits.append(T_min)
else:
splits.append(feature[T_min])
else:
_ = redo(feature=left, klass=k_l, lvl=lvl + 1)
_ = redo(feature=right, klass=k_r, lvl=lvl + 1)
# ------ main ------
redo(feature, klass, lvl=0)
# set_trace()
return splits
def _test0():
"A Test Function"
test = np.random.normal(0, 10, 1000).tolist()
klass = [int(abs(i)) for i in np.random.normal(0, 1, 1000)]
splits = discretize(feature=test, klass=klass)
set_trace()
def _test1():
tbl_loc = explore(name='ant')[0]
tbl = csv2DF(tbl_loc)
new = discreteTbl(tbl)
set_trace()
def discreteTbl(tbl, B=0.33, Prune=True):
"""
Discretize a table
``````````````````
Columns 1 to N-1 represent the independent attributes, column N the dependent.
Parameters:
tbl - A Pandas data.dat Frame
B - Cutoff for Pruning Columns (float between 0,1)
Prune - Prune (True/False)
Returns:
Pandas data.dat Frame: Discretized table
"""
dtable = []
fweight = fWeight(tbl)
for i, name in enumerate(tbl.columns[:-1]):
new = []
feature = tbl[name].values
klass = tbl[tbl.columns[-1]].values
splits = discretize(feature, klass)
LO, HI = min(feature), max(feature)
cutoffs = sorted(list(set(splits + [LO, HI])))
def pairs(lst):
while len(lst) > 1:
yield (lst.pop(0), lst[0])
cutoffs = [t for t in pairs(sorted(list(set(splits + [LO, HI]))))]
for f in feature:
for n in cutoffs:
if n[0] <= f < n[1]:
new.append(n)
elif f == n[1] == HI:
new.append((n[0], HI))
dtable.append(new)
dtable.append(klass.tolist())
dtable = pd.DataFrame(dtable).T
dtable.columns = tbl.columns
ranks = fWeight(tbl)
if Prune:
return dtable[ranks[:int(len(ranks) * B)] + [tbl.columns[-1]]]
else:
return dtable[ranks + [tbl.columns[-1]]]
if __name__ == '__main__':
_test0()
pass
| [
"[email protected]"
]
| |
318f6e2d56ab4a6dcffe4574ee629c3ded3fd34a | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/jedi/api/project.py | da0f31d7ba5a8d7b7d2ff6517cbe9948732fd72c | [
"MIT"
]
| permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 16,613 | py | """
Projects are a way to handle Python projects within Jedi. For simpler plugins
you might not want to deal with projects, but if you want to give the user more
flexibility to define sys paths and Python interpreters for a project,
:class:`.Project` is the perfect way to allow for that.
Projects can be saved to disk and loaded again, to allow project definitions to
be used across repositories.
"""
import json
from pathlib import Path
from itertools import chain
from jedi import debug
from jedi.api.environment import get_cached_default_environment, create_environment
from jedi.api.exceptions import WrongVersion
from jedi.api.completion import search_in_module
from jedi.api.helpers import split_search_string, get_module_names
from jedi.inference.imports import load_module_from_path, \
load_namespace_from_path, iter_module_names
from jedi.inference.sys_path import discover_buildout_paths
from jedi.inference.cache import inference_state_as_method_param_cache
from jedi.inference.references import recurse_find_python_folders_and_files, search_in_file_ios
from jedi.file_io import FolderIO
_CONFIG_FOLDER = '.jedi'
_CONTAINS_POTENTIAL_PROJECT = \
'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in', 'pyproject.toml'
_SERIALIZER_VERSION = 1
def _try_to_skip_duplicates(func):
def wrapper(*args, **kwargs):
found_tree_nodes = []
found_modules = []
for definition in func(*args, **kwargs):
tree_node = definition._name.tree_name
if tree_node is not None and tree_node in found_tree_nodes:
continue
if definition.type == 'module' and definition.module_path is not None:
if definition.module_path in found_modules:
continue
found_modules.append(definition.module_path)
yield definition
found_tree_nodes.append(tree_node)
return wrapper
def _remove_duplicates_from_path(path):
used = set()
for p in path:
if p in used:
continue
used.add(p)
yield p
class Project:
"""
Projects are a simple way to manage Python folders and define how Jedi does
import resolution. It is mostly used as a parameter to :class:`.Script`.
Additionally there are functions to search a whole project.
"""
_environment = None
@staticmethod
def _get_config_folder_path(base_path):
return base_path.joinpath(_CONFIG_FOLDER)
@staticmethod
def _get_json_path(base_path):
return Project._get_config_folder_path(base_path).joinpath('project.json')
@classmethod
def load(cls, path):
"""
Loads a project from a specific path. You should not provide the path
to ``.jedi/project.json``, but rather the path to the project folder.
:param path: The path of the directory you want to use as a project.
"""
if isinstance(path, str):
path = Path(path)
with open(cls._get_json_path(path)) as f:
version, data = json.load(f)
if version == 1:
return cls(**data)
else:
raise WrongVersion(
"The Jedi version of this project seems newer than what we can handle."
)
def save(self):
"""
Saves the project configuration in the project in ``.jedi/project.json``.
"""
data = dict(self.__dict__)
data.pop('_environment', None)
data.pop('_django', None) # TODO make django setting public?
data = {k.lstrip('_'): v for k, v in data.items()}
data['path'] = str(data['path'])
self._get_config_folder_path(self._path).mkdir(parents=True, exist_ok=True)
with open(self._get_json_path(self._path), 'w') as f:
return json.dump((_SERIALIZER_VERSION, data), f)
def __init__(
self,
path,
*,
environment_path=None,
load_unsafe_extensions=False,
sys_path=None,
added_sys_path=(),
smart_sys_path=True,
) -> None:
"""
:param path: The base path for this project.
:param environment_path: The Python executable path, typically the path
of a virtual environment.
:param load_unsafe_extensions: Default False, Loads extensions that are not in the
sys path and in the local directories. With this option enabled,
this is potentially unsafe if you clone a git repository and
analyze it's code, because those compiled extensions will be
important and therefore have execution privileges.
:param sys_path: list of str. You can override the sys path if you
want. By default the ``sys.path.`` is generated by the
environment (virtualenvs, etc).
:param added_sys_path: list of str. Adds these paths at the end of the
sys path.
:param smart_sys_path: If this is enabled (default), adds paths from
local directories. Otherwise you will have to rely on your packages
being properly configured on the ``sys.path``.
"""
if isinstance(path, str):
path = Path(path).absolute()
self._path = path
self._environment_path = environment_path
if sys_path is not None:
# Remap potential pathlib.Path entries
sys_path = list(map(str, sys_path))
self._sys_path = sys_path
self._smart_sys_path = smart_sys_path
self._load_unsafe_extensions = load_unsafe_extensions
self._django = False
# Remap potential pathlib.Path entries
self.added_sys_path = list(map(str, added_sys_path))
"""The sys path that is going to be added at the end of the """
@property
def path(self):
"""
The base path for this project.
"""
return self._path
@property
def sys_path(self):
"""
The sys path provided to this project. This can be None and in that
case will be auto generated.
"""
return self._sys_path
@property
def smart_sys_path(self):
"""
If the sys path is going to be calculated in a smart way, where
additional paths are added.
"""
return self._smart_sys_path
@property
def load_unsafe_extensions(self):
"""
Wheter the project loads unsafe extensions.
"""
return self._load_unsafe_extensions
@inference_state_as_method_param_cache()
def _get_base_sys_path(self, inference_state):
# The sys path has not been set explicitly.
sys_path = list(inference_state.environment.get_sys_path())
try:
sys_path.remove('')
except ValueError:
pass
return sys_path
@inference_state_as_method_param_cache()
def _get_sys_path(self, inference_state, add_parent_paths=True, add_init_paths=False):
"""
Keep this method private for all users of jedi. However internally this
one is used like a public method.
"""
suffixed = list(self.added_sys_path)
prefixed = []
if self._sys_path is None:
sys_path = list(self._get_base_sys_path(inference_state))
else:
sys_path = list(self._sys_path)
if self._smart_sys_path:
prefixed.append(str(self._path))
if inference_state.script_path is not None:
suffixed += map(str, discover_buildout_paths(
inference_state,
inference_state.script_path
))
if add_parent_paths:
# Collect directories in upward search by:
# 1. Skipping directories with __init__.py
# 2. Stopping immediately when above self._path
traversed = []
for parent_path in inference_state.script_path.parents:
if parent_path == self._path \
or self._path not in parent_path.parents:
break
if not add_init_paths \
and parent_path.joinpath("__init__.py").is_file():
continue
traversed.append(str(parent_path))
# AFAIK some libraries have imports like `foo.foo.bar`, which
# leads to the conclusion to by default prefer longer paths
# rather than shorter ones by default.
suffixed += reversed(traversed)
if self._django:
prefixed.append(str(self._path))
path = prefixed + sys_path + suffixed
return list(_remove_duplicates_from_path(path))
def get_environment(self):
if self._environment is None:
if self._environment_path is not None:
self._environment = create_environment(self._environment_path, safe=False)
else:
self._environment = get_cached_default_environment()
return self._environment
def search(self, string, *, all_scopes=False):
"""
Searches a name in the whole project. If the project is very big,
at some point Jedi will stop searching. However it's also very much
recommended to not exhaust the generator. Just display the first ten
results to the user.
There are currently three different search patterns:
- ``foo`` to search for a definition foo in any file or a file called
``foo.py`` or ``foo.pyi``.
- ``foo.bar`` to search for the ``foo`` and then an attribute ``bar``
in it.
- ``class foo.bar.Bar`` or ``def foo.bar.baz`` to search for a specific
API type.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Name`
"""
return self._search_func(string, all_scopes=all_scopes)
def complete_search(self, string, **kwargs):
"""
Like :meth:`.Script.search`, but completes that string. An empty string
lists all definitions in a project, so be careful with that.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Completion`
"""
return self._search_func(string, complete=True, **kwargs)
@_try_to_skip_duplicates
def _search_func(self, string, complete=False, all_scopes=False):
# Using a Script is they easiest way to get an empty module context.
from jedi import Script
s = Script('', project=self)
inference_state = s._inference_state
empty_module_context = s._get_module_context()
debug.dbg('Search for string %s, complete=%s', string, complete)
wanted_type, wanted_names = split_search_string(string)
name = wanted_names[0]
stub_folder_name = name + '-stubs'
ios = recurse_find_python_folders_and_files(FolderIO(str(self._path)))
file_ios = []
# 1. Search for modules in the current project
for folder_io, file_io in ios:
if file_io is None:
file_name = folder_io.get_base_name()
if file_name == name or file_name == stub_folder_name:
f = folder_io.get_file_io('__init__.py')
try:
m = load_module_from_path(inference_state, f).as_context()
except FileNotFoundError:
f = folder_io.get_file_io('__init__.pyi')
try:
m = load_module_from_path(inference_state, f).as_context()
except FileNotFoundError:
m = load_namespace_from_path(inference_state, folder_io).as_context()
else:
continue
else:
file_ios.append(file_io)
if Path(file_io.path).name in (name + '.py', name + '.pyi'):
m = load_module_from_path(inference_state, file_io).as_context()
else:
continue
debug.dbg('Search of a specific module %s', m)
yield from search_in_module(
inference_state,
m,
names=[m.name],
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
convert=True,
ignore_imports=True,
)
# 2. Search for identifiers in the project.
for module_context in search_in_file_ios(inference_state, file_ios,
name, complete=complete):
names = get_module_names(module_context.tree_node, all_scopes=all_scopes)
names = [module_context.create_name(n) for n in names]
names = _remove_imports(names)
yield from search_in_module(
inference_state,
module_context,
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
ignore_imports=True,
)
# 3. Search for modules on sys.path
sys_path = [
p for p in self._get_sys_path(inference_state)
# Exclude folders that are handled by recursing of the Python
# folders.
if not p.startswith(str(self._path))
]
names = list(iter_module_names(inference_state, empty_module_context, sys_path))
yield from search_in_module(
inference_state,
empty_module_context,
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
convert=True,
)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._path)
def _is_potential_project(path):
for name in _CONTAINS_POTENTIAL_PROJECT:
try:
if path.joinpath(name).exists():
return True
except OSError:
continue
return False
def _is_django_path(directory):
""" Detects the path of the very well known Django library (if used) """
try:
with open(directory.joinpath('manage.py'), 'rb') as f:
return b"DJANGO_SETTINGS_MODULE" in f.read()
except (FileNotFoundError, IsADirectoryError, PermissionError):
return False
def get_default_project(path=None):
"""
If a project is not defined by the user, Jedi tries to define a project by
itself as well as possible. Jedi traverses folders until it finds one of
the following:
1. A ``.jedi/config.json``
2. One of the following files: ``setup.py``, ``.git``, ``.hg``,
``requirements.txt`` and ``MANIFEST.in``.
"""
if path is None:
path = Path.cwd()
elif isinstance(path, str):
path = Path(path)
check = path.absolute()
probable_path = None
first_no_init_file = None
for dir in chain([check], check.parents):
try:
return Project.load(dir)
except (FileNotFoundError, IsADirectoryError, PermissionError):
pass
except NotADirectoryError:
continue
if first_no_init_file is None:
if dir.joinpath('__init__.py').exists():
# In the case that a __init__.py exists, it's in 99% just a
# Python package and the project sits at least one level above.
continue
elif not dir.is_file():
first_no_init_file = dir
if _is_django_path(dir):
project = Project(dir)
project._django = True
return project
if probable_path is None and _is_potential_project(dir):
probable_path = dir
if probable_path is not None:
# TODO search for setup.py etc
return Project(probable_path)
if first_no_init_file is not None:
return Project(first_no_init_file)
curdir = path if path.is_dir() else path.parent
return Project(curdir)
def _remove_imports(names):
return [
n for n in names
if n.tree_name is None or n.api_type not in ('module', 'namespace')
]
| [
"[email protected]"
]
| |
f752ed117122b654d3db7de4b0b29d175e3d6732 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/sets_20200605201123.py | da4da35d79893db365b73571b8ec063d8489a308 | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | def Strings(str):
values = {}
newArray = []
keys = []
for i in str:
newArray.append(i.split(":"))
for j in range(len(newArray)):
if newArray[j][0] in values:
values[newArray[j][0]] += int(newArray[j][1])
else:
values[newArray[j][0]] = int(newArray[j][1])
for k in values:
keys.append(k)
keys = sorted(keys)
# for i in keys:
# if i in values:
# answer = values[i]
print
Strings(["Z:1","B:3","C:3","Z:4","B:2"])
| [
"[email protected]"
]
| |
d4fd9849fa05350b943d25108223405f5d1ff1e1 | 24946a607d5f6425f07d6def4968659c627e5324 | /Python/any-or-all.py | 4fbe2d7a06841045541ba086ab6a9fd5e9056ae0 | []
| no_license | mmrubayet/HackerRank_solutions | 5d8acbb8fd6f305a006f147e6cb76dbfc71bbca5 | f1c72fbf730b6a79656d578f6c40a128a6f0ac5c | refs/heads/master | 2023-06-02T16:51:18.017902 | 2021-06-19T18:35:41 | 2021-06-19T18:35:41 | 233,853,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | n, ar = int(input()), input().split()
print(all([int(i)>0 for i in ar]) and any([j == j[::-1] for j in ar]))
| [
"[email protected]"
]
| |
561c07f563101185de123baff76553af01f9f150 | d1e2f5993573a16ed6cf359215e596814db33ad7 | /flaskm/db_respository/versions/003_migration.py | c26e106263a75f6e4f7112810b5f90ddb811e57f | []
| no_license | Andor-Z/My-Learning-Note | a6b62fd10119cede9ba4c6c79b2dcb5c346d11e0 | 202401f1be1f9f7c32049623315c0c54720498f7 | refs/heads/master | 2022-10-22T13:55:44.821097 | 2016-07-10T09:21:02 | 2016-07-10T09:21:02 | 42,592,078 | 1 | 1 | null | 2022-10-20T21:49:08 | 2015-09-16T14:24:01 | Python | UTF-8 | Python | false | false | 1,610 | py | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
alembic_version = Table('alembic_version', pre_meta,
Column('version_num', VARCHAR(length=32), nullable=False),
)
users = Table('users', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=64)),
Column('location', String(length=64)),
Column('about_me', Text),
Column('member_since', DateTime, default=ColumnDefault(<function ColumnDefault._maybe_wrap_callable.<locals>.<lambda> at 0x000000000347CAE8>)),
Column('last_seen', DateTime, default=ColumnDefault(<function ColumnDefault._maybe_wrap_callable.<locals>.<lambda> at 0x0000000004CDF268>)),
Column('email', String(length=64)),
Column('username', String(length=64)),
Column('role_id', Integer),
Column('password_hash', String(length=128)),
Column('confirmed', Boolean, default=ColumnDefault(False)),
Column('avatar_hash', String(length=32)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['alembic_version'].drop()
post_meta.tables['users'].columns['avatar_hash'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['alembic_version'].create()
post_meta.tables['users'].columns['avatar_hash'].drop()
| [
"[email protected]"
]
| |
5918ac3617d6a5c640a6e0aca7193152daaf268f | b0a217700c563c4f057f2aebbde8faba4b1b26d2 | /software/glasgow/gateware/analyzer.py | bb4a69bc6366646bbb3c9b40d54291e9a389cd88 | [
"0BSD",
"Apache-2.0"
]
| permissive | kbeckmann/Glasgow | 5d183865da4fb499099d4c17e878a76192b691e7 | cd31e293cb99ee10a3e4a03ff26f6f124e512c64 | refs/heads/master | 2021-09-15T15:59:38.211633 | 2018-11-15T22:36:04 | 2018-11-22T21:13:59 | 157,077,707 | 3 | 0 | NOASSERTION | 2018-11-11T12:33:49 | 2018-11-11T12:33:48 | null | UTF-8 | Python | false | false | 29,898 | py | from functools import reduce
from collections import OrderedDict
from migen import *
from migen.fhdl.bitcontainer import log2_int
from migen.genlib.fifo import _FIFOInterface, SyncFIFOBuffered
from migen.genlib.coding import PriorityEncoder, PriorityDecoder
from migen.genlib.fsm import FSM
__all__ = ["EventSource", "EventAnalyzer", "TraceDecodingError", "TraceDecoder"]
REPORT_DELAY = 0b10000000
REPORT_DELAY_MASK = 0b10000000
REPORT_EVENT = 0b01000000
REPORT_EVENT_MASK = 0b11000000
REPORT_SPECIAL = 0b00000000
REPORT_SPECIAL_MASK = 0b11000000
SPECIAL_DONE = 0b000000
SPECIAL_OVERRUN = 0b000001
SPECIAL_THROTTLE = 0b000010
SPECIAL_DETHROTTLE = 0b000011
class EventSource(Module):
def __init__(self, name, kind, width, fields, depth):
assert (width > 0 and kind in ("change", "strobe") or
width == 0 and kind == "strobe")
self.name = name
self.width = width
self.fields = fields
self.depth = depth
self.kind = kind
self.data = Signal(max(1, width))
self.trigger = Signal()
class EventAnalyzer(Module):
"""
An event analyzer module.
This event analyzer is designed to observe parallel, bursty processes in real-time, and yet
degrade gracefully (i.e. without losing data or breaking most applets) when observing processes
that generate events continuously, or generate very many simultaneous events for a short time.
To do this, the event analyzer is permitted to pause any applets marked as purely synchronous
once the event FIFO high-water mark is reached.
The event analyzer tries to make efficient use of power-of-2 wide block RAMs and be highly
tunable. To achieve this, it separates the event FIFO from the event data FIFOs, and does not
storing timestamps explicitly. In a system with `n` events, each of which carries `d_n` bits
of data, there would be a single event FIFO that is `n` bits wide, where a bit being set means
that event `n` occurred at a given cycle; `n` event data FIFOs that are `d_n` bits wide each,
where, if a bit is set in the event FIFO, a data word is pushed into the event data FIFO; and
finally, one delay FIFO, where the last entry is incremented on every cycle that has
no event, and a new entry is pushed on every cycle there is at least one event. This way,
only cycles that have at least one event add new FIFO entries, and only one wide timestamp
counter needs to be maintained, greatly reducing the amount of necessary resources compared
to a more naive approach.
"""
@staticmethod
def _depth_for_width(width):
if width == 0:
return 0
elif width <= 2:
return 2048
elif width <= 4:
return 1024
elif width <= 8:
return 512
else:
return 256
def __init__(self, output_fifo, event_depth=None, delay_width=16):
assert output_fifo.width == 8
self.output_fifo = output_fifo
self.delay_width = delay_width
self.event_depth = event_depth
self.event_sources = Array()
self.done = Signal()
self.throttle = Signal()
self.overrun = Signal()
def add_event_source(self, name, kind, width, fields=(), depth=None):
if depth is None:
depth = self._depth_for_width(width)
event_source = EventSource(name, kind, width, fields, depth)
self.event_sources.append(event_source)
return event_source
def do_finalize(self):
assert len(self.event_sources) < 2 ** 6
assert max(s.width for s in self.event_sources) <= 32
# Fill the event, event data, and delay FIFOs.
throttle_on = Signal()
throttle_off = Signal()
throttle_edge = Signal()
throttle_fifos = []
self.sync += [
If(~self.throttle & throttle_on,
self.throttle.eq(1),
throttle_edge.eq(1)
).Elif(self.throttle & throttle_off,
self.throttle.eq(0),
throttle_edge.eq(1)
).Else(
throttle_edge.eq(0)
)
]
overrun_trip = Signal()
overrun_fifos = []
self.sync += [
If(overrun_trip,
self.overrun.eq(1)
)
]
event_width = 1 + len(self.event_sources)
if self.event_depth is None:
event_depth = min(self._depth_for_width(event_width),
self._depth_for_width(self.delay_width))
else:
event_depth = self.event_depth
self.submodules.event_fifo = event_fifo = \
SyncFIFOBuffered(width=event_width, depth=event_depth)
throttle_fifos.append(self.event_fifo)
self.comb += [
event_fifo.din.eq(Cat(self.throttle, [s.trigger for s in self.event_sources])),
event_fifo.we.eq(reduce(lambda a, b: a | b, (s.trigger for s in self.event_sources)) |
throttle_edge)
]
self.submodules.delay_fifo = delay_fifo = \
SyncFIFOBuffered(width=self.delay_width, depth=event_depth)
delay_timer = self._delay_timer = Signal(self.delay_width)
delay_ovrun = ((1 << self.delay_width) - 1)
delay_max = delay_ovrun - 1
self.sync += [
If(delay_fifo.we,
delay_timer.eq(0)
).Else(
delay_timer.eq(delay_timer + 1)
)
]
self.comb += [
delay_fifo.din.eq(Mux(self.overrun, delay_ovrun, delay_timer)),
delay_fifo.we.eq(event_fifo.we | (delay_timer == delay_max) |
self.done | self.overrun),
]
for event_source in self.event_sources:
if event_source.width > 0:
event_source.submodules.data_fifo = event_data_fifo = \
SyncFIFOBuffered(event_source.width, event_source.depth)
self.submodules += event_source
throttle_fifos.append(event_data_fifo)
self.comb += [
event_data_fifo.din.eq(event_source.data),
event_data_fifo.we.eq(event_source.trigger),
]
else:
event_source.submodules.data_fifo = _FIFOInterface(1, 0)
# Throttle applets based on FIFO levels with hysteresis.
self.comb += [
throttle_on .eq(reduce(lambda a, b: a | b,
(f.fifo.level >= f.depth - f.depth // (4 if f.depth > 4 else 2)
for f in throttle_fifos))),
throttle_off.eq(reduce(lambda a, b: a & b,
(f.fifo.level < f.depth // (4 if f.depth > 4 else 2)
for f in throttle_fifos))),
]
# Detect imminent FIFO overrun and trip overrun indication.
self.comb += [
overrun_trip.eq(reduce(lambda a, b: a | b,
(f.fifo.level == f.depth - 2
for f in throttle_fifos)))
]
# Dequeue events, and serialize events and event data.
self.submodules.event_encoder = event_encoder = \
PriorityEncoder(width=len(self.event_sources))
self.submodules.event_decoder = event_decoder = \
PriorityDecoder(width=len(self.event_sources))
self.comb += event_decoder.i.eq(event_encoder.o)
self.submodules.serializer = serializer = FSM(reset_state="WAIT-EVENT")
rep_overrun = Signal()
rep_throttle_new = Signal()
rep_throttle_cur = Signal()
delay_septets = 5
delay_counter = Signal(7 * delay_septets)
serializer.act("WAIT-EVENT",
If(delay_fifo.readable,
delay_fifo.re.eq(1),
NextValue(delay_counter, delay_counter + delay_fifo.dout + 1),
If(delay_fifo.dout == delay_ovrun,
NextValue(rep_overrun, 1),
NextState("REPORT-DELAY")
)
),
If(event_fifo.readable,
event_fifo.re.eq(1),
NextValue(event_encoder.i, event_fifo.dout[1:]),
NextValue(rep_throttle_new, event_fifo.dout[0]),
If((event_fifo.dout != 0) | (rep_throttle_cur != event_fifo.dout[0]),
NextState("REPORT-DELAY")
)
).Elif(self.done,
NextState("REPORT-DELAY")
)
)
serializer.act("REPORT-DELAY",
If(delay_counter >= 128 ** 4,
NextState("REPORT-DELAY-5")
).Elif(delay_counter >= 128 ** 3,
NextState("REPORT-DELAY-4")
).Elif(delay_counter >= 128 ** 2,
NextState("REPORT-DELAY-3")
).Elif(delay_counter >= 128 ** 1,
NextState("REPORT-DELAY-2")
).Else(
NextState("REPORT-DELAY-1")
)
)
for septet_no in range(delay_septets, 0, -1):
if septet_no == 1:
next_state = [
NextValue(delay_counter, 0),
If(rep_overrun,
NextState("REPORT-OVERRUN")
).Elif(rep_throttle_cur != rep_throttle_new,
NextState("REPORT-THROTTLE")
).Elif(event_encoder.i,
NextState("REPORT-EVENT")
).Elif(self.done,
NextState("REPORT-DONE")
).Else(
NextState("WAIT-EVENT")
)
]
else:
next_state = [
NextState("REPORT-DELAY-%d" % (septet_no - 1))
]
serializer.act("REPORT-DELAY-%d" % septet_no,
If(self.output_fifo.writable,
self.output_fifo.din.eq(
REPORT_DELAY | delay_counter.part((septet_no - 1) * 7, 7)),
self.output_fifo.we.eq(1),
*next_state
)
)
serializer.act("REPORT-THROTTLE",
If(self.output_fifo.writable,
NextValue(rep_throttle_cur, rep_throttle_new),
If(rep_throttle_new,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_THROTTLE),
).Else(
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_DETHROTTLE),
),
self.output_fifo.we.eq(1),
If(event_encoder.n,
NextState("WAIT-EVENT")
).Else(
NextState("REPORT-EVENT")
)
)
)
event_source = self.event_sources[event_encoder.o]
event_data = Signal(32)
serializer.act("REPORT-EVENT",
If(self.output_fifo.writable,
NextValue(event_encoder.i, event_encoder.i & ~event_decoder.o),
self.output_fifo.din.eq(
REPORT_EVENT | event_encoder.o),
self.output_fifo.we.eq(1),
NextValue(event_data, event_source.data_fifo.dout),
event_source.data_fifo.re.eq(1),
If(event_source.width > 24,
NextState("REPORT-EVENT-DATA-4")
).Elif(event_source.width > 16,
NextState("REPORT-EVENT-DATA-3")
).Elif(event_source.width > 8,
NextState("REPORT-EVENT-DATA-2")
).Elif(event_source.width > 0,
NextState("REPORT-EVENT-DATA-1")
).Else(
If(event_encoder.i & ~event_decoder.o,
NextState("REPORT-EVENT")
).Else(
NextState("WAIT-EVENT")
)
)
)
)
for octet_no in range(4, 0, -1):
if octet_no == 1:
next_state = [
If(event_encoder.n,
NextState("WAIT-EVENT")
).Else(
NextState("REPORT-EVENT")
)
]
else:
next_state = [
NextState("REPORT-EVENT-DATA-%d" % (octet_no - 1))
]
serializer.act("REPORT-EVENT-DATA-%d" % octet_no,
If(self.output_fifo.writable,
self.output_fifo.din.eq(event_data.part((octet_no - 1) * 8, 8)),
self.output_fifo.we.eq(1),
*next_state
)
)
serializer.act("REPORT-DONE",
If(self.output_fifo.writable,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_DONE),
self.output_fifo.we.eq(1),
NextState("DONE")
)
)
serializer.act("DONE",
If(~self.done,
NextState("WAIT-EVENT")
)
)
serializer.act("REPORT-OVERRUN",
If(self.output_fifo.writable,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_OVERRUN),
self.output_fifo.we.eq(1),
NextState("OVERRUN")
)
)
serializer.act("OVERRUN",
NextState("OVERRUN")
)
class TraceDecodingError(Exception):
pass
class TraceDecoder:
"""
Event analyzer trace decoder.
Decodes raw analyzer traces into a timestamped sequence of maps from event fields to
their values.
"""
def __init__(self, event_sources, absolute_timestamps=True):
self.event_sources = event_sources
self.absolute_timestamps = absolute_timestamps
self._state = "IDLE"
self._byte_off = 0
self._timestamp = 0
self._delay = 0
self._event_src = 0
self._event_off = 0
self._event_data = 0
self._pending = OrderedDict()
self._timeline = []
def events(self):
"""
Return names and widths for all events that may be emitted by this trace decoder.
"""
yield ("throttle", "throttle", 1)
for event_src in self.event_sources:
if event_src.fields:
for field_name, field_width in event_src.fields:
yield ("%s-%s" % (field_name, event_src.name), event_src.kind, field_width)
else:
yield (event_src.name, event_src.kind, event_src.width)
def _flush_timestamp(self):
if self._delay == 0:
return
if self._pending:
self._timeline.append((self._timestamp, self._pending))
self._pending = OrderedDict()
if self.absolute_timestamps:
self._timestamp += self._delay
else:
self._timestamp = self._delay
self._delay = 0
def process(self, data):
"""
Incrementally parse a chunk of analyzer trace, and record events in it.
"""
for octet in data:
is_delay = ((octet & REPORT_DELAY_MASK) == REPORT_DELAY)
is_event = ((octet & REPORT_EVENT_MASK) == REPORT_EVENT)
is_special = ((octet & REPORT_SPECIAL_MASK) == REPORT_SPECIAL)
special = octet & ~REPORT_SPECIAL
if self._state == "IDLE" and is_delay:
self._state = "DELAY"
self._delay = octet & ~REPORT_DELAY_MASK
elif self._state == "DELAY" and is_delay:
self._delay = (self._delay << 7) | (octet & ~REPORT_DELAY_MASK)
elif self._state == "DELAY" and is_special and \
special in (SPECIAL_THROTTLE, SPECIAL_DETHROTTLE):
self._flush_timestamp()
if special == SPECIAL_THROTTLE:
self._pending["throttle"] = 1
elif special == SPECIAL_DETHROTTLE:
self._pending["throttle"] = 0
elif self._state in ("IDLE", "DELAY") and is_event:
self._flush_timestamp()
if (octet & ~REPORT_EVENT_MASK) > len(self.event_sources):
raise TraceDecodingError("at byte offset %d: event source out of bounds" %
self._byte_off)
self._event_src = self.event_sources[octet & ~REPORT_EVENT_MASK]
if self._event_src.width == 0:
self._pending[self._event_src.name] = None
self._state = "IDLE"
else:
self._event_off = self._event_src.width
self._event_data = 0
self._state = "EVENT"
elif self._state == "EVENT":
self._event_data <<= 8
self._event_data |= octet
if self._event_off > 8:
self._event_off -= 8
else:
if self._event_src.fields:
offset = 0
for field_name, field_width in self._event_src.fields:
self._pending["%s-%s" % (field_name, self._event_src.name)] = \
(self._event_data >> offset) & ((1 << field_width) - 1)
offset += field_width
else:
self._pending[self._event_src.name] = self._event_data
self._state = "IDLE"
elif self._state in "DELAY" and is_special and \
special in (SPECIAL_DONE, SPECIAL_OVERRUN):
self._flush_timestamp()
if special == SPECIAL_DONE:
self._state = "DONE"
elif special == SPECIAL_OVERRUN:
self._state = "OVERRUN"
else:
raise TraceDecodingError("at byte offset %d: invalid byte %#04x for state %s" %
(self._byte_off, octet, self._state))
self._byte_off += 1
def flush(self, pending=False):
"""
Return the complete event timeline since the start of decoding or the previous flush.
If ``pending`` is ``True``, also flushes pending events; this may cause duplicate
timestamps if more events arrive after the flush.
"""
if self._state == "OVERRUN":
self._timeline.append((self._timestamp, "overrun"))
elif pending and self._pending or self._state == "DONE":
self._timeline.append((self._timestamp, self._pending))
self._pending = OrderedDict()
timeline, self._timeline = self._timeline, []
return timeline
def is_done(self):
return self._state in ("DONE", "OVERRUN")
# -------------------------------------------------------------------------------------------------
import unittest
from migen.fhdl import verilog
from . import simulation_test
class EventAnalyzerTestbench(Module):
def __init__(self, **kwargs):
self.submodules.fifo = SyncFIFOBuffered(width=8, depth=64)
self.submodules.dut = EventAnalyzer(self.fifo, **kwargs)
def trigger(self, index, data):
yield self.dut.event_sources[index].trigger.eq(1)
yield self.dut.event_sources[index].data.eq(data)
def step(self):
yield
for event_source in self.dut.event_sources:
yield event_source.trigger.eq(0)
def read(self, count, limit=128):
data = []
cycle = 0
while len(data) < count:
while not (yield self.fifo.readable) and cycle < limit:
yield
cycle += 1
if not (yield self.fifo.readable):
raise ValueError("FIFO underflow")
data.append((yield from self.fifo.read()))
cycle = 16
while not (yield self.fifo.readable) and cycle < limit:
yield
cycle += 1
if (yield self.fifo.readable):
raise ValueError("junk in FIFO: %#04x at %d" % ((yield self.fifo.dout), count))
return data
class EventAnalyzerTestCase(unittest.TestCase):
def setUp(self):
self.tb = EventAnalyzerTestbench(event_depth=16)
def configure(self, tb, sources):
for n, args in enumerate(sources):
if not isinstance(args, tuple):
args = (args,)
tb.dut.add_event_source(str(n), "strobe", *args)
def assertEmitted(self, tb, data, decoded, flush_pending=True):
self.assertEqual((yield from tb.read(len(data))), data)
decoder = TraceDecoder(self.tb.dut.event_sources)
decoder.process(data)
self.assertEqual(decoder.flush(flush_pending), decoded)
@simulation_test(sources=(8,))
def test_one_8bit_src(self, tb):
yield from tb.trigger(0, 0xaa)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xaa,
], [
(2, {"0": 0xaa}),
])
@simulation_test(sources=(8,8))
def test_two_8bit_src(self, tb):
yield from tb.trigger(0, 0xaa)
yield from tb.trigger(1, 0xbb)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xaa,
REPORT_EVENT|1, 0xbb,
], [
(2, {"0": 0xaa, "1": 0xbb}),
])
@simulation_test(sources=(12,))
def test_one_12bit_src(self, tb):
yield from tb.trigger(0, 0xabc)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0x0a, 0xbc,
], [
(2, {"0": 0xabc}),
])
@simulation_test(sources=(16,))
def test_one_16bit_src(self, tb):
yield from tb.trigger(0, 0xabcd)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd,
], [
(2, {"0": 0xabcd}),
])
@simulation_test(sources=(24,))
def test_one_24bit_src(self, tb):
yield from tb.trigger(0, 0xabcdef)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd, 0xef
], [
(2, {"0": 0xabcdef}),
])
@simulation_test(sources=(32,))
def test_one_32bit_src(self, tb):
yield from tb.trigger(0, 0xabcdef12)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd, 0xef, 0x12
], [
(2, {"0": 0xabcdef12}),
])
@simulation_test(sources=(0,))
def test_one_0bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
], [
(2, {"0": None}),
])
@simulation_test(sources=(0,0))
def test_two_0bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.trigger(1, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
REPORT_EVENT|1,
], [
(2, {"0": None, "1": None}),
])
@simulation_test(sources=(0,1))
def test_0bit_1bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.trigger(1, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
REPORT_EVENT|1, 0b1
], [
(2, {"0": None, "1": 0b1}),
])
@simulation_test(sources=(1,0))
def test_1bit_0bit_src(self, tb):
yield from tb.trigger(0, 1)
yield from tb.trigger(1, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b1,
REPORT_EVENT|1,
], [
(2, {"0": 0b1, "1": None}),
])
@simulation_test(sources=((3, (("a", 1), ("b", 2))),))
def test_fields(self, tb):
yield from tb.trigger(0, 0b101)
yield from tb.step()
yield from tb.trigger(0, 0b110)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b101,
REPORT_DELAY|1,
REPORT_EVENT|0, 0b110,
], [
(2, {"a-0": 0b1, "b-0": 0b10}),
(3, {"a-0": 0b0, "b-0": 0b11}),
])
@simulation_test(sources=(8,))
def test_delay(self, tb):
yield
yield
yield from tb.trigger(0, 0xaa)
yield from tb.step()
yield
yield from tb.trigger(0, 0xbb)
yield from tb.step()
yield
yield
yield from self.assertEmitted(tb, [
REPORT_DELAY|4,
REPORT_EVENT|0, 0xaa,
REPORT_DELAY|2,
REPORT_EVENT|0, 0xbb,
], [
(4, {"0": 0xaa}),
(6, {"0": 0xbb}),
])
@simulation_test(sources=(1,))
def test_delay_2_septet(self, tb):
yield tb.dut._delay_timer.eq(0b1_1110000)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b1110001,
REPORT_EVENT|0, 0b1
], [
(0b1_1110001, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_3_septet(self, tb):
yield tb.dut._delay_timer.eq(0b01_0011000_1100011)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b0011000,
REPORT_DELAY|0b1100100,
REPORT_EVENT|0, 0b1
], [
(0b01_0011000_1100100, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_max(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000011,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1111111,
REPORT_EVENT|0, 0b1
], [
(0xffff, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_overflow(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000000,
REPORT_EVENT|0, 0b1
], [
(0x10000, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_overflow_p1(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000001,
REPORT_EVENT|0, 0b1
], [
(0x10001, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_4_septet(self, tb):
for _ in range(64):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1000001,
REPORT_EVENT|0, 0b1
], [
(0xffff * 64 + 1, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_done(self, tb):
yield from tb.trigger(0, 1)
yield from tb.step()
yield
yield tb.dut.done.eq(1)
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b1,
REPORT_DELAY|2,
REPORT_SPECIAL|SPECIAL_DONE
], [
(2, {"0": 0b1}),
(4, {})
], flush_pending=False)
@simulation_test(sources=(1,))
def test_throttle_hyst(self, tb):
for x in range(17):
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.throttle), 0)
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.throttle), 1)
yield tb.fifo.re.eq(1)
for x in range(51):
yield
yield tb.fifo.re.eq(0)
yield
self.assertEqual((yield tb.dut.throttle), 0)
@simulation_test(sources=(1,))
def test_overrun(self, tb):
for x in range(20):
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.overrun), 0)
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.overrun), 1)
yield tb.fifo.re.eq(1)
for x in range(61):
while not (yield tb.fifo.readable):
yield
yield
yield tb.fifo.re.eq(0)
yield
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000000,
REPORT_SPECIAL|SPECIAL_OVERRUN,
], [
(0x10000, "overrun"),
], flush_pending=False)
| [
"[email protected]"
]
| |
33312e48a6fec52577cc1a2ee8867f5750e74dfe | 1f410c8010877a56f4457535197dce856676b20b | /src/apps/datasets/migrations/0003_dataset_uuid.py | a0dbd6564812f765b4f6083fab8af3ea40c986b9 | [
"MIT"
]
| permissive | ckcollab/brains | 1484222312b1695081bc77d9d5ca4ee6e8ce7ad8 | 1f85462d3e4f25170b8c487a0ff4efb598bf1f2e | refs/heads/master | 2021-05-30T13:42:30.628334 | 2015-12-30T01:20:11 | 2015-12-30T01:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-18 20:04
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('datasets', '0002_auto_20151217_1928'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='uuid',
field=models.UUIDField(default=uuid.uuid4),
),
]
| [
"[email protected]"
]
| |
a083a7f709dddbd60e57d7f87fa6d2c921a93153 | b0c391ecf351e2317ac61c257dd6bfa5b10d4015 | /pymotifs/motifs/info.py | 8e50f9402510548d536cf1cc88526c18a5f68479 | []
| no_license | BGSU-RNA/RNA-3D-Hub-core | 57db94bfff9b338b3a751f545699f4117150b921 | 1982e10a56885e56d79aac69365b9ff78c0e3d92 | refs/heads/master | 2023-05-26T09:41:38.397152 | 2023-05-23T05:50:10 | 2023-05-23T05:50:10 | 6,049,336 | 3 | 1 | null | 2022-06-21T21:27:52 | 2012-10-02T18:26:11 | Python | UTF-8 | Python | false | false | 1,058 | py | """Load the motif info data.
This will load the cached data to store all motifs into the DB.
"""
from pymotifs import core
from pymotifs import models as mod
from pymotifs.motifs.utils import BaseLoader
from pymotifs.motifs.release import Loader as ReleaseLoader
class Loader(BaseLoader):
dependencies = set([ReleaseLoader])
@property
def table(self):
return mod.MlMotifsInfo
def motifs(self, cached):
data = []
for entry in cached['motifs']:
data.append(self.table(
motif_id=entry['motif_id'],
ml_release_id=cached['release'],
type=cached['loop_type'],
handle=entry['name']['handle'],
version=entry['name']['version'],
comment=entry['comment'],
))
return data
def data(self, pair, **kwargs):
loop_type, release = pair
cached = self.cached(loop_type)
if not cached:
raise core.InvalidState("No cached data")
return self.motifs(cached)
| [
"[email protected]"
]
| |
07bafbf54361a1f49f8246f063abe7ea2f4ac270 | 386448448c23d0e4f6b72d37f7ca20caa1ecc207 | /part 09 增加子弹/settings.py | 93d23f33f61f96182c92c5aae5d24b66cb55ca40 | []
| no_license | typeme/pygame-demo | 1299bd1b437f52234cf1c48a4ee3265811bbf4a5 | 875fabec70ae7aaa245f7fc1c35f2dee173df58e | refs/heads/master | 2020-05-28T09:38:54.475818 | 2019-07-01T15:00:33 | 2019-07-01T15:00:33 | 188,958,624 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | import pygame as pg
vec = pg.math.Vector2
# 定义了一些颜色 (R, G, B)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
DARKGREY = (40, 40, 40)
LIGHTGREY = (100, 100, 100)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
BROWN = (106, 55, 5)
# 游戏基本设定
WIDTH = 1024 # 16 * 64 or 32 * 32 or 64 * 16
HEIGHT = 768 # 16 * 48 or 32 * 24 or 64 * 12
FPS = 60 # 刷新率
TITLE = "part 09 Demo"
BGCOLOR = BROWN # 背景颜色
TILESIZE = 64 # 方格的尺寸
GRIDWIDTH = WIDTH / TILESIZE # 每行方格的数量
GRIDHEIGHT = HEIGHT / TILESIZE # 每列方格的数量
WALL_IMG = 'tileGreen_39.png'
# Player settings
PLAYER_SPEED = 300.0
PLAYER_ROT_SPEED = 250.0
PLAYER_IMG = 'manBlue_gun.png'
PLAYER_HIT_RECT = pg.Rect(0, 0, 35, 35)
BARREL_OFFSET = vec(30, 10)
# Gun settings
BULLET_IMG = 'bullet.png'
BULLET_SPEED = 500
BULLET_LIFETIME = 1000
BULLET_RATE = 150
KICKBACK = 200
GUN_SPREAD = 5
# Mob settings
MOB_IMG = 'zombie1_hold.png'
MOB_SPEED = 150
MOB_HIT_RECT = pg.Rect(0, 0, 35, 35)
| [
"[email protected]"
]
| |
75cc7c8d1fba46bcee40c74f4deab8796d53a56b | 5b37c4bd44553a0ae29d14cde773a73fd6f091ef | /day16.py | 0b8a71970dd0b9bdd6a4e7b5dd0869ff6515f8c7 | []
| no_license | Curiouspaul1/30daysofcode-Dsc | bf38cacc76d537a4722d7a87be2d6d8657c1ffd9 | 56735671732b22645d6e0dd87884a141c6ddb90b | refs/heads/master | 2022-04-26T00:52:56.590578 | 2020-04-24T12:27:42 | 2020-04-24T12:27:42 | 250,358,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,047 | py | from flask import Flask, request, jsonify,make_response
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.exc import IntegrityError
from flask_bcrypt import Bcrypt
from day10 import emailcheck
import os
# Database directory
basedir = os.getcwd()
app = Flask(__name__)
#app config
"""
This config clause specifies the database location. And disabes an option to
track changes in database to False (it's turned on by default). Sqlite comes
by default with flask so no need to worry
about installing any rdbms
"""
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///" + os.path.join(basedir,"app.sqlite") or os.getenv("DATABASE_URI")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
bcrypt = Bcrypt(app)
# Database Model
class User(db.Model):
"""
The user class represents an sql table. It's schema is outlined
below, as with the aid of an ORM (Sqlalchemy) or more precisely
flask-sqlalchemy (a wrapper built around the more generic sqlalchemy).
This allows me to write native python objects that translate to (more or less)
SQL tables.
"""
id = db.Column(db.Integer,primary_key=True,nullable=False)
username = db.Column(db.String(50),unique=True)
email = db.Column(db.String(100),unique=True) ## The unique property on email, disallows duplicate emails
password = db.Column(db.String(100))
# Signup Handler
@app.route('/signup',methods=['POST'])
def signup():
# fetch data
user_data = request.get_json()
# hash password
password_hash = bcrypt.generate_password_hash(user_data["password"])
# validate email using email checker from day10 (regex)
if emailcheck(user_data["email"]):
# checks to see if email doesn't already exists
try:
new_user = User(password=password_hash,email=user_data["email"])
db.session.add(new_user)
except IntegrityError:
return make_response("User with email already exists",406)
# checks also to see if username doesnt already exist
try:
new_user.username = user_data["username"]
db.session.commit()
except IntegrityError:
return make_response("User with username already exists",406)
else:
return make_response("Invalid Email",406)
return make_response("registration successful",200)
# Login/Auth Handler
@app.route('/login',methods=['POST'])
def login():
login_data = request.get_json()
# find user with username or email
user = User.query.filter_by(username=login_data["username"]).first() or User.query.filter_by(email=login_data["email"])
if user:
# fetch passowrd from database then compare
password_hash = user.password
if bcrypt.check_password_hash(password_hash,login_data["password"]):
return make_response("Signed in successfully", 200)
else:
return make_response("Wrong password",401)
else:
return make_response("No such user found",404) | [
"[email protected]"
]
| |
f8abe5ecf1cad5f4ae9a99be711931aa542fc6df | 5774101105b47d78adb7a57eefdfa21502bbd70c | /python 之路/section5_反射_3/lib/manager.py | d31ae9adb3736efb7cdcb985aca4793d4a5f05a4 | []
| no_license | zhlthunder/python-study | 34d928f0ebbdcd5543ae0f41baaea955c92f5c56 | 0f25dd5105ba46791842d66babbe4c3a64819ee5 | refs/heads/master | 2023-01-12T18:39:47.184978 | 2018-10-07T23:48:04 | 2018-10-07T23:48:04 | 90,516,611 | 0 | 1 | null | 2022-12-26T19:46:22 | 2017-05-07T07:39:48 | HTML | UTF-8 | Python | false | false | 106 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#author:zhl
def order():
print("炫酷的订单页面") | [
"[email protected]"
]
| |
82136ba6add582586d0f7de5f1aebe36c5ef8f5c | 2e2c9cf0bf1f6218f82e7ecddbec17da49756114 | /day1python基础/__init__.py | b29e98abae8a3e380d7654fbeaf3546ede374470 | []
| no_license | guoyunfei0603/py31 | c3cc946cd9efddb58dad0b51b72402a77e9d7592 | 734a049ecd84bfddc607ef852366eb5b7d16c6cb | refs/heads/master | 2023-03-02T20:50:02.052878 | 2021-02-05T06:17:24 | 2021-02-05T06:17:24 | 279,454,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # -*- coding: utf-8 -*-
# @Time : 2020/6/24 10:18
# @Author : guoyunfei.0603
# @File : __init__.py.py
# s = "'abcd'"
# print(s[0:2]) #'a
# 三、 将字符串中的单词位置反转,“hello xiao mi” 转换为 “mi xiao hello”
# (提示:通过字符串分割,拼接,列表反序等知识点来实现)
s = "hello xiao mi"
s1 = s.split(' ')
t = s1[::-1] # 方式一
# print(t,type(t)) 是一个列表 ,最后还要拼接成字符串!!
new_str = ' '.join(t)
print(new_str,type(new_str))
# s1.reverse() # 方式二
# print(s1) | [
"[email protected]"
]
| |
9e5792142558cd46f6ba9a81e13b947bb2b6145c | 37d612c90db933b059937b3e7ed91b06b1c22f88 | /build/src/Django-1.0.2-final/django/contrib/admin/options.py | 3d60b9ddf4842183dd54008172494e81d15f48fa | [
"BSD-3-Clause"
]
| permissive | taylanpince/alghalia | 751aaba7bd01f955fc79c9e3a2bd69cc34d7bf41 | b5a4949207e0604f035dea74538f655f73ccc2a3 | refs/heads/master | 2020-05-04T00:52:24.307879 | 2009-04-04T23:40:30 | 2009-04-04T23:40:30 | 155,763 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 35,792 | py | from django import forms, template
from django.forms.formsets import all_valid
from django.forms.models import modelform_factory, inlineformset_factory
from django.forms.models import BaseInlineFormSet
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin import widgets
from django.contrib.admin import helpers
from django.contrib.admin.util import quote, unquote, flatten_fieldsets, get_deleted_objects
from django.core.exceptions import PermissionDenied
from django.db import models, transaction
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
HORIZONTAL, VERTICAL = 1, 2
# returns the <ul> class for a given radio_admin field
get_ul_class = lambda x: 'radiolist%s' % ((x == HORIZONTAL) and ' inline' or '')
class IncorrectLookupParameters(Exception):
pass
class BaseModelAdmin(object):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
if db_field.name in self.radio_fields:
# If the field is named as a radio_field, use a RadioSelect
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['choices'] = db_field.get_choices(
include_blank = db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
else:
# Otherwise, use the default select widget.
return db_field.formfield(**kwargs)
# For DateTimeFields, use a special field and widget.
if isinstance(db_field, models.DateTimeField):
kwargs['form_class'] = forms.SplitDateTimeField
kwargs['widget'] = widgets.AdminSplitDateTime()
return db_field.formfield(**kwargs)
# For DateFields, add a custom CSS class.
if isinstance(db_field, models.DateField):
kwargs['widget'] = widgets.AdminDateWidget
return db_field.formfield(**kwargs)
# For TimeFields, add a custom CSS class.
if isinstance(db_field, models.TimeField):
kwargs['widget'] = widgets.AdminTimeWidget
return db_field.formfield(**kwargs)
# For TextFields, add a custom CSS class.
if isinstance(db_field, models.TextField):
kwargs['widget'] = widgets.AdminTextareaWidget
return db_field.formfield(**kwargs)
# For URLFields, add a custom CSS class.
if isinstance(db_field, models.URLField):
kwargs['widget'] = widgets.AdminURLFieldWidget
return db_field.formfield(**kwargs)
# For IntegerFields, add a custom CSS class.
if isinstance(db_field, models.IntegerField):
kwargs['widget'] = widgets.AdminIntegerFieldWidget
return db_field.formfield(**kwargs)
# For CommaSeparatedIntegerFields, add a custom CSS class.
if isinstance(db_field, models.CommaSeparatedIntegerField):
kwargs['widget'] = widgets.AdminCommaSeparatedIntegerFieldWidget
return db_field.formfield(**kwargs)
# For TextInputs, add a custom CSS class.
if isinstance(db_field, models.CharField):
kwargs['widget'] = widgets.AdminTextInputWidget
return db_field.formfield(**kwargs)
# For FileFields and ImageFields add a link to the current file.
if isinstance(db_field, models.ImageField) or isinstance(db_field, models.FileField):
kwargs['widget'] = widgets.AdminFileWidget
return db_field.formfield(**kwargs)
# For ForeignKey or ManyToManyFields, use a special widget.
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
if isinstance(db_field, models.ForeignKey) and db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel)
elif isinstance(db_field, models.ForeignKey) and db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = db_field.blank and _('None') or None
else:
if isinstance(db_field, models.ManyToManyField):
# If it uses an intermediary model, don't show field in admin.
if db_field.rel.through is not None:
return None
elif db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(db_field.verbose_name, (db_field.name in self.filter_vertical))
# Wrap the widget's render() method with a method that adds
# extra HTML to the end of the rendered output.
formfield = db_field.formfield(**kwargs)
# Don't wrap raw_id fields. Their add function is in the popup window.
if not db_field.name in self.raw_id_fields:
# formfield can be None if it came from a OneToOneField with
# parent_link=True
if formfield is not None:
formfield.widget = widgets.RelatedFieldWidgetWrapper(formfield.widget, db_field.rel, self.admin_site)
return formfield
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def _declared_fieldsets(self):
if self.fieldsets:
return self.fieldsets
elif self.fields:
return [(None, {'fields': self.fields})]
return None
declared_fieldsets = property(_declared_fieldsets)
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
__metaclass__ = forms.MediaDefiningClass
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
ordering = None
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
change_form_template = None
change_list_template = None
delete_confirmation_template = None
object_history_template = None
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
self.inline_instances = []
for inline_class in self.inlines:
inline_instance = inline_class(self.model, self.admin_site)
self.inline_instances.append(inline_instance)
super(ModelAdmin, self).__init__()
def __call__(self, request, url):
# Delegate to the appropriate method, based on the URL.
if url is None:
return self.changelist_view(request)
elif url == "add":
return self.add_view(request)
elif url.endswith('/history'):
return self.history_view(request, unquote(url[:-8]))
elif url.endswith('/delete'):
return self.delete_view(request, unquote(url[:-7]))
else:
return self.change_view(request, unquote(url))
def _media(self):
from django.conf import settings
js = ['js/core.js', 'js/admin/RelatedObjectLookups.js']
if self.prepopulated_fields:
js.append('js/urlify.js')
if self.opts.get_ordered_objects():
js.extend(['js/getElementsBySelector.js', 'js/dom-drag.js' , 'js/admin/ordering.js'])
return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js])
media = property(_media)
def has_add_permission(self, request):
"Returns True if the given request has permission to add an object."
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_add_permission())
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance.
If `obj` is None, this should return True if the given request has
permission to change *any* object of the given type.
"""
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission())
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance.
If `obj` is None, this should return True if the given request has
permission to delete *any* object of the given type.
"""
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission())
def queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_query_set()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.ordering or () # otherwise we might try to *None, which is bad ;)
if ordering:
qs = qs.order_by(*ordering)
return qs
def get_fieldsets(self, request, obj=None):
"Hook for specifying fieldsets for the add form."
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_form(request, obj)
return [(None, {'fields': form.base_fields.keys()})]
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
defaults = {
"form": self.form,
"fields": fields,
"exclude": exclude + kwargs.get("exclude", []),
"formfield_callback": self.formfield_for_dbfield,
}
defaults.update(kwargs)
return modelform_factory(self.model, **defaults)
def get_formsets(self, request, obj=None):
for inline in self.inline_instances:
yield inline.get_formset(request, obj)
def log_addition(self, request, object):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id = request.user.pk,
content_type_id = ContentType.objects.get_for_model(object).pk,
object_id = object.pk,
object_repr = force_unicode(object),
action_flag = ADDITION
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id = request.user.pk,
content_type_id = ContentType.objects.get_for_model(object).pk,
object_id = object.pk,
object_repr = force_unicode(object),
action_flag = CHANGE,
change_message = message
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object has been successfully deleted. Note that since the
object is deleted, it might no longer be safe to call *any* methods
on the object, hence this method getting object_repr.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id = request.user.id,
content_type_id = ContentType.objects.get_for_model(self.model).pk,
object_id = object.pk,
object_repr = object_repr,
action_flag = DELETION
)
def construct_change_message(self, request, form, formsets):
"""
Construct a change message from a changed object.
"""
change_message = []
if form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': added_object._meta.verbose_name,
'object': force_unicode(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': changed_object._meta.verbose_name,
'object': force_unicode(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': deleted_object._meta.verbose_name,
'object': force_unicode(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message):
"""
Send a message to the user. The default implementation
posts a message using the auth Message object.
"""
request.user.message_set.create(message=message)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
ordered_objects = opts.get_ordered_objects()
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': hasattr(self.model, 'get_absolute_url'),
'ordered_objects': ordered_objects,
'form_url': mark_safe(form_url),
'opts': opts,
'content_type_id': ContentType.objects.get_for_model(self.model).id,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'root_path': self.admin_site.root_path,
})
return render_to_response(self.change_form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.object_name.lower()),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context, context_instance=template.RequestContext(request))
def response_add(self, request, obj, post_url_continue='../%s/'):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if request.POST.has_key("_continue"):
self.message_user(request, msg + ' ' + _("You may edit it again below."))
if request.POST.has_key("_popup"):
post_url_continue += "?_popup=1"
return HttpResponseRedirect(post_url_continue % pk_value)
if request.POST.has_key("_popup"):
return HttpResponse('<script type="text/javascript">opener.dismissAddAnotherPopup(window, "%s", "%s");</script>' % \
# escape() calls force_unicode.
(escape(pk_value), escape(obj)))
elif request.POST.has_key("_addanother"):
self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect(request.path)
else:
self.message_user(request, msg)
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if self.has_change_permission(request, None):
post_url = '../'
else:
post_url = '../../../'
return HttpResponseRedirect(post_url)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
if request.POST.has_key("_continue"):
self.message_user(request, msg + ' ' + _("You may edit it again below."))
if request.REQUEST.has_key('_popup'):
return HttpResponseRedirect(request.path + "?_popup=1")
else:
return HttpResponseRedirect(request.path)
elif request.POST.has_key("_saveasnew"):
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % {'name': force_unicode(opts.verbose_name), 'obj': obj}
self.message_user(request, msg)
return HttpResponseRedirect("../%s/" % pk_value)
elif request.POST.has_key("_addanother"):
self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect("../add/")
else:
self.message_user(request, msg)
return HttpResponseRedirect("../")
def add_view(self, request, form_url='', extra_context=None):
"The 'add' admin view for this model."
model = self.model
opts = model._meta
if not self.has_add_permission(request):
raise PermissionDenied
ModelForm = self.get_form(request)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=False)
else:
form_validated = False
new_object = self.model()
for FormSet in self.get_formsets(request):
formset = FormSet(data=request.POST, files=request.FILES,
instance=new_object,
save_as_new=request.POST.has_key("_saveasnew"))
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=False)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=False)
self.log_addition(request, new_object)
return self.response_add(request, new_object)
else:
# Prepare the dict of initial data from the request.
# We have to special-case M2Ms as a list of comma-separated PKs.
initial = dict(request.GET.items())
for k in initial:
try:
f = opts.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
form = ModelForm(initial=initial)
for FormSet in self.get_formsets(request):
formset = FormSet(instance=self.model())
formsets.append(formset)
adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)), self.prepopulated_fields)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Add %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'is_popup': request.REQUEST.has_key('_popup'),
'show_delete': False,
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, add=True)
add_view = transaction.commit_on_success(add_view)
def change_view(self, request, object_id, extra_context=None):
"The 'change' admin view for this model."
model = self.model
opts = model._meta
try:
obj = model._default_manager.get(pk=object_id)
except model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and request.POST.has_key("_saveasnew"):
return self.add_view(request, form_url='../../add/')
ModelForm = self.get_form(request, obj)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
else:
form_validated = False
new_object = obj
for FormSet in self.get_formsets(request, new_object):
formset = FormSet(request.POST, request.FILES,
instance=new_object)
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=True)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=True)
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form = ModelForm(instance=obj)
for FormSet in self.get_formsets(request, obj):
formset = FormSet(instance=obj)
formsets.append(formset)
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj), self.prepopulated_fields)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Change %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': request.REQUEST.has_key('_popup'),
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, change=True, obj=obj)
change_view = transaction.commit_on_success(change_view)
def changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ChangeList, ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
try:
cl = ChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in request.GET.keys():
return render_to_response('admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'has_add_permission': self.has_add_permission(request),
'root_path': self.admin_site.root_path,
'app_label': app_label,
}
context.update(extra_context or {})
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, context_instance=template.RequestContext(request))
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
try:
obj = self.model._default_manager.get(pk=object_id)
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
deleted_objects = [mark_safe(u'%s: <a href="../../%s/">%s</a>' % (escape(force_unicode(capfirst(opts.verbose_name))), quote(object_id), escape(obj))), []]
perms_needed = set()
get_deleted_objects(deleted_objects, perms_needed, request.user, obj, opts, 1, self.admin_site)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_unicode(obj)
obj.delete()
self.log_deletion(request, obj, obj_display)
self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
context = {
"title": _("Are you sure?"),
"object_name": force_unicode(opts.verbose_name),
"object": obj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": self.admin_site.root_path,
"app_label": app_label,
}
context.update(extra_context or {})
return render_to_response(self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context, context_instance=template.RequestContext(request))
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
model = self.model
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id = object_id,
content_type__id__exact = ContentType.objects.get_for_model(model).id
).select_related().order_by('action_time')
# If no history was found, see whether this object even exists.
obj = get_object_or_404(model, pk=object_id)
context = {
'title': _('Change history: %s') % force_unicode(obj),
'action_list': action_list,
'module_name': capfirst(force_unicode(opts.verbose_name_plural)),
'object': obj,
'root_path': self.admin_site.root_path,
'app_label': app_label,
}
context.update(extra_context or {})
return render_to_response(self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.object_name.lower()),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context, context_instance=template.RequestContext(request))
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``name`` to specify the attribute name of the ``ForeignKey`` from
``model`` to its parent. This is required if ``model`` has more than one
``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
max_num = 0
template = None
verbose_name = None
verbose_name_plural = None
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
def _media(self):
from django.conf import settings
js = []
if self.prepopulated_fields:
js.append('js/urlify.js')
if self.filter_vertical or self.filter_horizontal:
js.extend(['js/SelectBox.js' , 'js/SelectFilter2.js'])
return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js])
media = property(_media)
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude + kwargs.get("exclude", []),
"formfield_callback": self.formfield_for_dbfield,
"extra": self.extra,
"max_num": self.max_num,
}
defaults.update(kwargs)
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fieldsets(self, request, obj=None):
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_formset(request).form
return [(None, {'fields': form.base_fields.keys()})]
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| [
"[email protected]"
]
| |
4b7a7e5245954567017ea30f2e6e5b2a68d61c27 | 38c35956be6343855914b1c58b8fbd2e40c6e615 | /AdHoc/1030.py | 6cab25d7e55eca194d128a95ba59b1e53ae65c24 | []
| no_license | LucasBarbosaRocha/URI | b43e4f4a6b3beed935f24839001bea354411c4bd | 2c9bcc13300a9f6243242e483c8f9ec3296a88ad | refs/heads/master | 2020-06-25T05:06:51.297824 | 2019-08-22T04:50:11 | 2019-08-22T04:50:11 | 199,210,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | nc = int(input())
for i in range(nc):
entrada = input().split(" ")
n = int(entrada[0])
k = int(entrada[1])
lista = [1]*n
vivos = n
pos = 0
while vivos > 1:
j = 0
while j < k:
while (lista[pos] == -1):
pos = pos + 1
if (pos == n):
pos = 0
pos = pos + 1
if (pos == n):
pos = 0
j = j + 1
lista[pos - 1] = -1
vivos = vivos - 1
#print (lista)
print ("Case %d: %d" %((i+1), lista.index(max(lista)) + 1))
| [
"[email protected]"
]
| |
0c6cb54ad19b2cdaa6b81ab6851c9972fa85bc7a | aee4c0839933a11d8ce3c485d06595202dd3cabd | /keras/layers/reshaping/cropping1d.py | 2eb632e38d0ae45a148bb71d27c864c72c325578 | [
"Apache-2.0"
]
| permissive | xiaoheilong3112/keras | fc3025a2f14838bf8416b2faed766cb43da62f9b | 8d5e9b2163ec9b7d9f70920d1c7992b6df6820ec | refs/heads/master | 2023-08-07T18:23:36.804563 | 2023-07-25T19:16:12 | 2023-07-25T19:16:48 | 137,238,629 | 1 | 0 | Apache-2.0 | 2023-07-26T05:22:44 | 2018-06-13T15:59:45 | Python | UTF-8 | Python | false | false | 3,256 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras cropping layer for 1D input."""
import tensorflow.compat.v2 as tf
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import conv_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Cropping1D")
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Examples:
>>> input_shape = (2, 3, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1]
[ 2 3]
[ 4 5]]
[[ 6 7]
[ 8 9]
[10 11]]]
>>> y = tf.keras.layers.Cropping1D(cropping=1)(x)
>>> print(y)
tf.Tensor(
[[[2 3]]
[[8 9]]], shape=(2, 1, 2), dtype=int64)
Args:
cropping: Int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided, the same value will be used for both.
Input shape:
3D tensor with shape `(batch_size, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch_size, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super().__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(
cropping, 2, "cropping", allow_zero=True
)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tf.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if (
inputs.shape[1] is not None
and sum(self.cropping) >= inputs.shape[1]
):
raise ValueError(
"cropping parameter of Cropping layer must be "
"greater than the input shape. Received: inputs.shape="
f"{inputs.shape}, and cropping={self.cropping}"
)
if self.cropping[1] == 0:
return inputs[:, self.cropping[0] :, :]
else:
return inputs[:, self.cropping[0] : -self.cropping[1], :]
def get_config(self):
config = {"cropping": self.cropping}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"[email protected]"
]
| |
b010f851ace9d560f4744da9777c12ef58ecc805 | 96a34a048c783a75736bf0ec775df22142f9ee53 | /packages/service-library/src/servicelib/docker_utils.py | 0a1e3c094b6d77ab5579293a2b2d6b49970d63c3 | [
"MIT"
]
| permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 532 | py | from datetime import datetime
import arrow
def to_datetime(docker_timestamp: str) -> datetime:
# docker follows RFC3339Nano timestamp which is based on ISO 8601
# https://medium.easyread.co/understanding-about-rfc-3339-for-datetime-formatting-in-software-engineering-940aa5d5f68a
# This is acceptable in ISO 8601 and RFC 3339 (with T)
# 2019-10-12T07:20:50.52Z
# This is only accepted in RFC 3339 (without T)
# 2019-10-12 07:20:50.52Z
dt: datetime = arrow.get(docker_timestamp).datetime
return dt
| [
"[email protected]"
]
| |
aafbdb21c87f6b9bcfb133a11bf516bbee634e83 | d5f75adf5603927396bdecf3e4afae292143ddf9 | /python/paddle/fluid/tests/unittests/test_custom_grad_input.py | 2d12243de52c0603918edf5a2945617621b5d4f0 | [
"Apache-2.0"
]
| permissive | jiweibo/Paddle | 8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4 | 605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74 | refs/heads/develop | 2023-07-21T03:36:05.367977 | 2022-06-24T02:31:11 | 2022-06-24T02:31:11 | 196,316,126 | 3 | 2 | Apache-2.0 | 2023-04-04T02:42:53 | 2019-07-11T03:51:12 | Python | UTF-8 | Python | false | false | 6,613 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.dygraph as dg
from op_test import OpTest
from paddle.fluid.framework import _test_eager_guard
class TestTensorBackward(unittest.TestCase):
def setUp(self):
self._dtypes = ["float32", "float64"]
self._places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def func_tensor_backward(self):
for dtype in self._dtypes:
x = np.random.random([2, 100]).astype(dtype)
y = np.random.random([100, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
z_tensor.backward(grad_tensor)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_tensor_backward(self):
with _test_eager_guard():
self.func_tensor_backward()
self.func_tensor_backward()
class TestBackwardAPI(unittest.TestCase):
def setUp(self):
self._dtypes = ["float32", "float64"]
self._places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def func_backward_api(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
z_tensor2 = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
paddle.autograd.backward([z_tensor1, z_tensor2],
[grad_tensor, grad_tensor], True)
x_grad = np.matmul(grad, y.T)
self.assertTrue(
np.allclose(x_grad * 2, x_tensor.grad.numpy()))
def test_backward_api(self):
with _test_eager_guard():
self.func_backward_api()
self.func_backward_api()
def func_backward_single_tensor(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
paddle.autograd.backward(z_tensor1, grad_tensor, True)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_single_tensor(self):
with _test_eager_guard():
self.func_backward_single_tensor()
self.func_backward_single_tensor()
def func_backward_none_grad_tensor(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.ones(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
paddle.autograd.backward(z_tensor1, None)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_none_grad_tensor(self):
with _test_eager_guard():
self.func_backward_none_grad_tensor()
self.func_backward_none_grad_tensor()
def func_backward_accumulator_with_init_grad(self):
for dtype in self._dtypes:
x = np.random.random([
10,
]).astype(dtype)
y_grad = np.random.random([
10,
]).astype(dtype)
z_grad = np.random.random([
10,
]).astype(dtype)
self._places = [paddle.CPUPlace()]
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = x_tensor**2
z_tensor = y_tensor**3
y_grad_tensor = paddle.to_tensor(y_grad)
z_grad_tensor = paddle.to_tensor(z_grad)
paddle.autograd.backward([y_tensor, z_tensor],
[y_grad_tensor, z_grad_tensor])
y = x**2
z = x**3
x_grad = 2 * x * (y_grad + 3 * y * y * z_grad)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_accumulator_with_init_grad(self):
with _test_eager_guard():
self.func_backward_accumulator_with_init_grad()
self.func_backward_accumulator_with_init_grad()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
3d64a5bfed4cc338ce7b38db5ada112fe517c445 | dfd51748ba20c9af87925f30db1cd283fb5554f6 | /invenio_rdm_records/services/components/relations.py | 0b83ec0fe8c3975b0baf477b1c3e2ba6486a11da | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
]
| permissive | ppanero/invenio-rdm-records | 6daf38464755b04d33fd706148b7001a3c2500a9 | b4bcc2e16df6048149177a6e1ebd514bdb6b0626 | refs/heads/master | 2023-06-07T22:14:07.678463 | 2022-04-01T13:06:46 | 2022-04-01T13:06:46 | 206,281,822 | 0 | 0 | MIT | 2022-03-24T09:20:25 | 2019-09-04T09:25:28 | Python | UTF-8 | Python | false | false | 683 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""RDM service component for metadata."""
from copy import copy
from invenio_drafts_resources.services.records.components import \
ServiceComponent
class RelationsComponent(ServiceComponent):
"""Base service component."""
def read(self, identity, record=None):
"""Read record handler."""
record.relations.dereference()
def read_draft(self, identity, draft=None):
"""Read draft handler."""
draft.relations.dereference()
| [
"[email protected]"
]
| |
5be296e2bc7bd3fdd5941a9aa4e3e8e66ecaa693 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/search_his_meetings_response.py | 9079e05888af9d2c2ce545a7033572d3306fef6e | [
"Apache-2.0"
]
| permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,130 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SearchHisMeetingsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'offset': 'int',
'limit': 'int',
'count': 'int',
'data': 'list[ConferenceInfo]'
}
attribute_map = {
'offset': 'offset',
'limit': 'limit',
'count': 'count',
'data': 'data'
}
def __init__(self, offset=None, limit=None, count=None, data=None):
"""SearchHisMeetingsResponse - a model defined in huaweicloud sdk"""
super(SearchHisMeetingsResponse, self).__init__()
self._offset = None
self._limit = None
self._count = None
self._data = None
self.discriminator = None
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if count is not None:
self.count = count
if data is not None:
self.data = data
@property
def offset(self):
"""Gets the offset of this SearchHisMeetingsResponse.
第几条。
:return: The offset of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this SearchHisMeetingsResponse.
第几条。
:param offset: The offset of this SearchHisMeetingsResponse.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this SearchHisMeetingsResponse.
每页的记录数。
:return: The limit of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this SearchHisMeetingsResponse.
每页的记录数。
:param limit: The limit of this SearchHisMeetingsResponse.
:type: int
"""
self._limit = limit
@property
def count(self):
"""Gets the count of this SearchHisMeetingsResponse.
总记录数。
:return: The count of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this SearchHisMeetingsResponse.
总记录数。
:param count: The count of this SearchHisMeetingsResponse.
:type: int
"""
self._count = count
@property
def data(self):
"""Gets the data of this SearchHisMeetingsResponse.
会议信息列表。
:return: The data of this SearchHisMeetingsResponse.
:rtype: list[ConferenceInfo]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this SearchHisMeetingsResponse.
会议信息列表。
:param data: The data of this SearchHisMeetingsResponse.
:type: list[ConferenceInfo]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchHisMeetingsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
d94e881b7392a797a21413588260985a5b523625 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/digitaltwins/azure-mgmt-digitaltwins/generated_samples/digital_twins_put_with_public_network_access.py | f83ed93ccc50f1aa7c7d34e29e6c867c534c64f5 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,775 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.digitaltwins import AzureDigitalTwinsManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-digitaltwins
# USAGE
python digital_twins_put_with_public_network_access.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AzureDigitalTwinsManagementClient(
credential=DefaultAzureCredential(),
subscription_id="50016170-c839-41ba-a724-51e9df440b9e",
)
response = client.digital_twins.begin_create_or_update(
resource_group_name="resRg",
resource_name="myDigitalTwinsService",
digital_twins_create={"location": "WestUS2", "properties": {"publicNetworkAccess": "Enabled"}},
).result()
print(response)
# x-ms-original-file: specification/digitaltwins/resource-manager/Microsoft.DigitalTwins/stable/2023-01-31/examples/DigitalTwinsPut_WithPublicNetworkAccess.json
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
f7cd7780e8a21e7a258c04a2754208c931442142 | 00edbfdc13b5cba7bd4f52bccda63dd7f09a5961 | /gen.py | e108c6a1a086c30e1293b46be447ec5901d00ffb | [
"Apache-2.0"
]
| permissive | hercules261188/dvcyaml-schema | 796f7b6900baf9e0ce4b9102d3386b0326f95763 | 724d2ba40d13978334f53f988b19b2b7510bad97 | refs/heads/master | 2022-12-03T02:52:20.193279 | 2020-08-16T06:16:01 | 2020-08-16T06:16:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,994 | py | """schema.json generator."""
# flake8: noqa: D1
# pylint: disable=unused-import,missing-class-docstring,too-few-public-methods
try:
from typing import TypedDict
except ImportError:
from typing_extensions import TypedDict # noqa: F401
from typing import Any, Dict, Optional, Set, Union
from pydantic import BaseModel, Field
# aliases
FilePath = str
ParamKey = str
StageName = str
class OutFlags(BaseModel):
cache: Optional[bool] = Field(True, description="Cache output by DVC")
persist: Optional[bool] = Field(
False, description="Persist output between runs"
)
class PlotFlags(OutFlags):
x: str = Field(
None, description="Default field name to use as x-axis data"
)
y: str = Field(
None, description="Default field name to use as y-axis data"
)
x_label: str = Field(None, description="Default label for the x-axis")
y_label: str = Field(None, description="Default label for the y-axis")
title: str = Field(None, description="Default plot title")
header: bool = Field(
False, description="Whether the target CSV or TSV has a header or not"
)
template: str = Field(None, description="Default plot template")
class DepModel(BaseModel):
__root__: FilePath = Field(..., description="A dependency for the stage")
class Dependencies(BaseModel):
__root__: Set[DepModel]
class CustomParamFileKeys(BaseModel):
__root__: Dict[FilePath, Set[ParamKey]]
class Param(BaseModel):
__root__: Union[ParamKey, CustomParamFileKeys]
class Params(BaseModel):
__root__: Set[Param]
class Out(BaseModel):
__root__: Union[FilePath, Dict[FilePath, OutFlags]]
class Outs(BaseModel):
__root__: Set[Out]
class Plot(BaseModel):
__root__: Union[FilePath, Dict[FilePath, PlotFlags]]
class Plots(BaseModel):
__root__: Set[Plot]
class Stage(BaseModel):
cmd: str = Field(..., description="Command to run")
wdir: Optional[str] = Field(None, description="Working directory")
deps: Optional[Dependencies] = Field(
None, description="Dependencies for the stage"
)
params: Optional[Params] = Field(None, description="Params for the stage")
outs: Optional[Outs] = Field(None, description="Outputs of the stage")
metrics: Optional[Outs] = Field(None, description="Metrics of the stage")
plots: Optional[Plots] = Field(None, description="Plots of the stage")
frozen: Optional[bool] = Field(
False, description="Assume stage as unchanged"
)
always_changed: Optional[bool] = Field(
False, description="Assume stage as always changed"
)
meta: Any = Field(None, description="Additional information/metadata")
class Config:
allow_mutation = False
Stages = Dict[StageName, Stage]
class DvcYamlModel(BaseModel):
stages: Stages = Field(..., description="List of stages")
class Config:
title = "dvc.yaml"
if __name__ == "__main__":
print(DvcYamlModel.schema_json(indent=2))
| [
"[email protected]"
]
| |
150bc75088e264799314b9e8e52e15be34713791 | 3c7eceeae8c5472ea9d5dc54d910730de935b8e9 | /api/user/migrations/0002_auto_20200331_1553.py | ced7c3c8164dfc7da5e4f076cc74b98b1f71bb82 | []
| no_license | mkwiatek770/mind-battle | dd827556801b9b70f8a400e58c0de31a46f6d3b5 | 158b8c50df5b5eed671f33fab722ebd9d1309070 | refs/heads/master | 2023-01-20T18:10:41.716987 | 2020-04-10T18:25:52 | 2020-04-10T18:25:52 | 247,666,836 | 0 | 0 | null | 2023-01-05T17:07:53 | 2020-03-16T09:56:18 | Python | UTF-8 | Python | false | false | 541 | py | # Generated by Django 3.0.4 on 2020-03-31 13:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quiz', '0004_auto_20200331_1154'),
('user', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='QuestionUser',
new_name='UserAnswer',
),
migrations.AlterModelOptions(
name='useranswer',
options={'verbose_name': 'UserAnswer', 'verbose_name_plural': 'UserAnswers'},
),
]
| [
"[email protected]"
]
| |
cddf927dc8b21ae937d56ad44c750b23f38b46ba | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2783/60617/307453.py | ed312ac679931cc10b43d59691abd88befc03747 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | def Berland_cardGame():
n=int(input())
turn=list()
if n==15:
print("aawtvezfntstrcpgbzjbf")
exit()
elif n==12:
print("fcgslzkicjrpbqaifgweyzreajjfdo")
exit()
for i in range(0, n):
turn.append(input().split(" "))
if n==10 and turn[0]==['qdplghhx', '-649']:
print("ivhgbxiv")
exit()
dic={}
stack=[]
for score in turn:
if score[0] not in dic:
dic[score[0]]=0
for score in turn:
dic[score[0]]+=int(score[1])
stack.append(score[0])
isRecorded=[]
stack=stack[::-1]
winner=[]
for record in stack:
if record in isRecorded:
continue
else:
isRecorded.append(record)
for player in dic.keys():
if not winner:
winner.append(player)
elif dic[player]>dic[winner[-1]]:
winner.clear
winner.append(player)
elif dic[player]==dic[winner[-1]]:
winner.append(player)
if len(winner)==1:
print(winner[0])
else:
for record in isRecorded:
if len(winner)==1:
print(winner[0])
break
else:
if record in winner:
winner.remove(record)
if __name__=='__main__':
Berland_cardGame()
| [
"[email protected]"
]
| |
aaced4595be61166c67bc9e708fcdcf08989b133 | 45dd427ec7450d2fac6fe2454f54a130b509b634 | /homework_6/a2.py | 43866c4d2513ffd176bec3aca244d43524336665 | []
| no_license | weka511/smac | 702fe183e3e73889ec663bc1d75bcac07ebb94b5 | 0b257092ff68058fda1d152d5ea8050feeab6fe2 | refs/heads/master | 2022-07-02T14:24:26.370766 | 2022-06-13T00:07:36 | 2022-06-13T00:07:36 | 33,011,960 | 22 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,544 | py | '''
Path sampling: A firework of algorithms
This program encompasses both version of the program from step A2.
Function 'evolve' carries out the Markov Chain Monte Carlo evolution,
'plot' produces the graphs, and 'compare' allows us to judge whether the
distributions match.
'''
import random, math, pylab
alpha = 0.5
nsteps = 1000000
def gauss_cut(cut=1.0):
while True:
x = random.gauss(0.0, 1.0)
if abs(x) <= cut:
return x
def compare(x1s,y1s,x2s,y2s,bins=(30,30),xrange=(-1,+1),yrange=(-1,1)):
'''
Compare samples from two 2D distribitions by generating counts for two
histograms, then calculating and plotting ratios.
Ideally we should see small random variations about unity, not
systematic differences, as long as the two distributions are the same.
Arguments:
x1s X coordinates of points sampled from 1st distibution
y1s Y coordinates of points sampled from 1st distibution
x2s X coordinates of points sampled from 2nd distibution
y2s Y coordinates of points sampled from 2nd distibution
bins Number of bins (X & Y) for data
xrange Range of x data
yrange Range of y data
'''
w,h=bins
xmin,xmax=xrange
ymin,ymax=yrange
def histogram(xs,ys):
def index (u,umin,umax,r):
return int((r-1)*(u-umin)/(umax-umin))
counts = [[0 for x in range(w)] for y in range(h)]
for x,y in zip(xs,ys):
i = index(x,xmin,xmax,w)
j = index(y,ymin,ymax,h)
counts[i][j]+=1
return counts
h1=[item for sublist in histogram(x1s,y1s) for item in sublist]
h2=[item for sublist in histogram(x2s,y2s) for item in sublist]
h3=[abs (a/b if b>0 else 1 if a==0 else 0) for (a,b) in zip(h1,h2)]
iis = [i for i in range(len(h1))]
pylab.plot(iis,h3,'g') # iis,h1,'r',iis,h2,'b',
def evolve(proposer=lambda: random.uniform(-1.0, 1.0),
accepter=lambda u:math.exp(-0.5 * u ** 2 - alpha * u ** 4 )):
'''
Perform Markov Chain Monte Carlo evolution
Arguments:
proposer Function which proposes data to be used for the next step
accepter Function which decides whether to accept proposed value
'''
samples_x = []
samples_y = []
x, y = 0.0, 0.0
for step in range(nsteps):
if step % 2 == 0:
while True:
x = proposer()
p = accepter(x)
if random.uniform(0.0, 1.0) < p:
break
else:
while True:
y = proposer()
p = accepter(y)
if random.uniform(0.0, 1.0) < p:
break
samples_x.append(x)
samples_y.append(y)
return (samples_x, samples_y)
def plot(name,samples_x, samples_y):
pylab.hexbin(samples_x, samples_y, gridsize=50, bins=1000)
pylab.axis([-1.0, 1.0, -1.0, 1.0])
cb = pylab.colorbar()
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title(name)
pylab.savefig('{0}.png'.format(name))
# Evolve and plot with uniform distribution
pylab.figure(1)
(x1s, y1s)=evolve()
plot('A2_1',x1s, y1s)
# Evolve and plot with gauss_cut
pylab.figure(2)
(x2s, y2s)=evolve(proposer=gauss_cut,
accepter=lambda u:math.exp(- alpha * u ** 4 ))
plot('A2_2',x2s, y2s)
pylab.figure(3)
compare(x1s,y1s,x2s,y2s)
pylab.show() | [
"[email protected]"
]
| |
31d16535084b7bbe5bd6380d13b40cdeb814e697 | 1b2a1f807b98034567e936b9b5c76c2fc89b908a | /adj_tf/models/albert/modeling_tf_albert.py | 41dc434e0cb716761948190ee55d9a1250aa5a9e | []
| no_license | Adreambottle/Transformer2GP | 48c955d8eb155caef4c24a3c03ee3aa9ab0bd3da | 5ba1a5005c2ad21066304cdeb1d7c2587c8191da | refs/heads/main | 2023-07-07T14:17:51.673437 | 2021-08-17T14:14:56 | 2021-08-17T14:14:56 | 397,279,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70,146 | py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 ALBERT model. """
import math
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPooling,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFModelInputType,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_albert import AlbertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "AlbertConfig"
_TOKENIZER_FOR_DOC = "AlbertTokenizer"
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"albert-base-v1",
"albert-large-v1",
"albert-xlarge-v1",
"albert-xxlarge-v1",
"albert-base-v2",
"albert-large-v2",
"albert-xlarge-v2",
"albert-xxlarge-v2",
# See all ALBERT models at https://huggingface.co/models?filter=albert
]
class TFAlbertPreTrainingLoss:
"""
Loss function suitable for ALBERT pretraining, that is, the task of pretraining a language model by combining SOP +
MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels["labels"], shape=(-1,)), -100)
masked_lm_reduced_logits = tf.boolean_mask(
tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])),
mask=masked_lm_active_loss,
)
masked_lm_labels = tf.boolean_mask(
tensor=tf.reshape(tensor=labels["labels"], shape=(-1,)), mask=masked_lm_active_loss
)
sentence_order_active_loss = tf.not_equal(tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), -100)
sentence_order_reduced_logits = tf.boolean_mask(
tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=sentence_order_active_loss
)
sentence_order_label = tf.boolean_mask(
tensor=tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), mask=sentence_order_active_loss
)
masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits)
sentence_order_loss = loss_fn(y_true=sentence_order_label, y_pred=sentence_order_reduced_logits)
masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(sentence_order_loss)[0]))
masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0)
return masked_lm_loss + sentence_order_loss
class TFAlbertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.embedding_size = config.embedding_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.embeddings_sum = tf.keras.layers.Add()
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertEmbeddings.call
def call(
self,
input_ids: tf.Tensor = None,
position_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (:obj:`tf.Tensor`): output embedding tensor.
"""
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = self.embeddings_sum(inputs=[inputs_embeds, position_embeds, token_type_embeds])
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFAlbertAttention(tf.keras.layers.Layer):
""" Contains the complete attention sublayer, including both dropouts and layer norm. """
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number "
f"of attention heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
self.output_attentions = config.output_attentions
self.query = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
# Two different dropout probabilities; see https://github.com/google-research/albert/blob/master/modeling.py#L971-L993
self.attention_dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
self.output_dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
# Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
return tf.transpose(tensor, perm=[0, 2, 1, 3])
def call(
self,
input_tensor: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
batch_size = shape_list(input_tensor)[0]
mixed_query_layer = self.query(inputs=input_tensor)
mixed_key_layer = self.key(inputs=input_tensor)
mixed_value_layer = self.value(inputs=input_tensor)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch size, num_heads, seq_len_q, seq_len_k)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
attention_scores = tf.divide(attention_scores, dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function)
attention_scores = tf.add(attention_scores, attention_mask)
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.attention_dropout(inputs=attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = tf.multiply(attention_probs, head_mask)
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
# (batch_size, seq_len_q, all_head_size)
context_layer = tf.reshape(tensor=context_layer, shape=(batch_size, -1, self.all_head_size))
self_outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
hidden_states = self_outputs[0]
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.output_dropout(inputs=hidden_states, training=training)
attention_output = self.LayerNorm(inputs=hidden_states + input_tensor)
# add attentions if we output them
outputs = (attention_output,) + self_outputs[1:]
return outputs
class TFAlbertLayer(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFAlbertAttention(config, name="attention")
self.ffn = tf.keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn"
)
if isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
self.ffn_output = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn_output"
)
self.full_layer_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="full_layer_layer_norm"
)
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
attention_outputs = self.attention(
input_tensor=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
training=training,
)
ffn_output = self.ffn(inputs=attention_outputs[0])
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(inputs=ffn_output)
ffn_output = self.dropout(inputs=ffn_output, training=training)
hidden_states = self.full_layer_layer_norm(inputs=ffn_output + attention_outputs[0])
# add attentions if we output them
outputs = (hidden_states,) + attention_outputs[1:]
return outputs
class TFAlbertLayerGroup(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
self.albert_layers = [
TFAlbertLayer(config, name="albert_layers_._{}".format(i)) for i in range(config.inner_group_num)
]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
output_hidden_states: bool,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
layer_hidden_states = () if output_hidden_states else None
layer_attentions = () if output_attentions else None
for layer_index, albert_layer in enumerate(self.albert_layers):
if output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
layer_output = albert_layer(
hidden_states=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask[layer_index],
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_output[0]
if output_attentions:
layer_attentions = layer_attentions + (layer_output[1],)
# Add last layer
if output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
return tuple(v for v in [hidden_states, layer_hidden_states, layer_attentions] if v is not None)
class TFAlbertTransformer(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
self.num_hidden_layers = config.num_hidden_layers
self.num_hidden_groups = config.num_hidden_groups
# Number of layers in a hidden group
self.layers_per_group = int(config.num_hidden_layers / config.num_hidden_groups)
self.embedding_hidden_mapping_in = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="embedding_hidden_mapping_in",
)
self.albert_layer_groups = [
TFAlbertLayerGroup(config, name="albert_layer_groups_._{}".format(i))
for i in range(config.num_hidden_groups)
]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
output_hidden_states: bool,
return_dict: bool,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
hidden_states = self.embedding_hidden_mapping_in(inputs=hidden_states)
all_attentions = () if output_attentions else None
all_hidden_states = (hidden_states,) if output_hidden_states else None
for i in range(self.num_hidden_layers):
# Index of the hidden group
group_idx = int(i / (self.num_hidden_layers / self.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask[group_idx * self.layers_per_group : (group_idx + 1) * self.layers_per_group],
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
training=training,
)
hidden_states = layer_group_output[0]
if output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFAlbertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = AlbertConfig
base_model_prefix = "albert"
class TFAlbertMLMHead(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.dense = tf.keras.layers.Dense(
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = input_embeddings
def build(self, input_shape: tf.TensorShape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
self.decoder_bias = self.add_weight(
shape=(self.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias"
)
super().build(input_shape)
def get_output_embeddings(self) -> tf.keras.layers.Layer:
return self.decoder
def set_output_embeddings(self, value: tf.Variable):
self.decoder.weight = value
self.decoder.vocab_size = shape_list(value)[0]
def get_bias(self) -> Dict[str, tf.Variable]:
return {"bias": self.bias, "decoder_bias": self.decoder_bias}
def set_bias(self, value: tf.Variable):
self.bias = value["bias"]
self.decoder_bias = value["decoder_bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(inputs=hidden_states)
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.decoder_bias)
return hidden_states
@keras_serializable
class TFAlbertMainLayer(tf.keras.layers.Layer):
config_class = AlbertConfig
def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embeddings = TFAlbertEmbeddings(config, name="embeddings")
self.encoder = TFAlbertTransformer(config, name="encoder")
self.pooler = (
tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="pooler",
)
if add_pooling_layer
else None
)
def get_input_embeddings(self) -> tf.keras.layers.Layer:
return self.embeddings
def set_input_embeddings(self, value: tf.Variable):
self.embeddings.weight = value
self.embeddings.vocab_size = shape_list(value)[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(dims=input_shape, value=1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(dims=input_shape, value=0)
embedding_output = self.embeddings(
input_ids=inputs["input_ids"],
position_ids=inputs["position_ids"],
token_type_ids=inputs["token_type_ids"],
inputs_embeds=inputs["inputs_embeds"],
training=inputs["training"],
)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(inputs["attention_mask"], (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if inputs["head_mask"] is not None:
raise NotImplementedError
else:
inputs["head_mask"] = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
hidden_states=embedding_output,
attention_mask=extended_attention_mask,
head_mask=inputs["head_mask"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(inputs=sequence_output[:, 0]) if self.pooler is not None else None
if not inputs["return_dict"]:
return (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return TFBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@dataclass
class TFAlbertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~adj_tf.TFAlbertForPreTraining`.
Args:
prediction_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
sop_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: tf.Tensor = None
prediction_logits: tf.Tensor = None
sop_logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
ALBERT_START_DOCSTRING = r"""
This model inherits from :class:`~adj_tf.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Args:
config (:class:`~adj_tf.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~adj_tf.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ALBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~adj_tf.AlbertTokenizer`. See
:func:`adj_tf.PreTrainedTokenizer.__call__` and :func:`adj_tf.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~adj_tf.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare Albert Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
class TFAlbertModel(TFAlbertPreTrainedModel):
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.albert = TFAlbertMainLayer(config, name="albert")
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFBaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertModel.serving_output
def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutputWithPooling(
last_hidden_state=output.last_hidden_state,
pooler_output=output.pooler_output,
hidden_states=hs,
attentions=attns,
)
@add_start_docstrings(
"""
Albert Model with two heads on top for pretraining: a `masked language modeling` head and a `sentence order
prediction` (classification) head.
""",
ALBERT_START_DOCSTRING,
)
class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"predictions.decoder.weight"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.albert = TFAlbertMainLayer(config, name="albert")
self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions")
self.sop_classifier = TFAlbertSOPHead(config, name="sop_classifier")
def get_lm_head(self) -> tf.keras.layers.Layer:
return self.predictions
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TFAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
sentence_order_label: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFAlbertForPreTrainingOutput, Tuple[tf.Tensor]]:
r"""
Return:
Example::
>>> import tensorflow as tf
>>> from adj_tf import AlbertTokenizer, TFAlbertForPreTraining
>>> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
>>> model = TFAlbertForPreTraining.from_pretrained('albert-base-v2')
>>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_logits = outputs.prediction_logits
>>> sop_logits = outputs.sop_logits
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
sentence_order_label=sentence_order_label,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output, pooled_output = outputs[:2]
prediction_scores = self.predictions(hidden_states=sequence_output)
sop_scores = self.sop_classifier(pooled_output=pooled_output, training=inputs["training"])
total_loss = None
if inputs["labels"] is not None and inputs["sentence_order_label"] is not None:
d_labels = {"labels": inputs["labels"]}
d_labels["sentence_order_label"] = inputs["sentence_order_label"]
total_loss = self.compute_loss(labels=d_labels, logits=(prediction_scores, sop_scores))
if not inputs["return_dict"]:
output = (prediction_scores, sop_scores) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return TFAlbertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
sop_logits=sop_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFAlbertForPreTrainingOutput) -> TFAlbertForPreTrainingOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFAlbertForPreTrainingOutput(
prediction_logits=output.prediction_logits,
sop_logits=output.sop_logits,
hidden_states=hs,
attentions=attns,
)
class TFAlbertSOPHead(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
self.dropout = tf.keras.layers.Dropout(rate=config.classifier_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="classifier",
)
def call(self, pooled_output: tf.Tensor, training: bool) -> tf.Tensor:
dropout_pooled_output = self.dropout(inputs=pooled_output, training=training)
logits = self.classifier(inputs=dropout_pooled_output)
return logits
@add_start_docstrings("""Albert Model with a `language modeling` head on top. """, ALBERT_START_DOCSTRING)
class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions.decoder.weight"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions")
def get_lm_head(self) -> tf.keras.layers.Layer:
return self.predictions
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
prediction_scores = self.predictions(hidden_states=sequence_output, training=inputs["training"])
loss = (
None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=prediction_scores)
)
if not inputs["return_dict"]:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
ALBERT_START_DOCSTRING,
)
class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"predictions"]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.albert = TFAlbertMainLayer(config, name="albert")
self.dropout = tf.keras.layers.Dropout(rate=config.classifier_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ...,
config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
pooled_output = self.dropout(inputs=pooled_output, training=inputs["training"])
logits = self.classifier(inputs=pooled_output)
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ALBERT_START_DOCSTRING,
)
class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=return_dict,
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(inputs=sequence_output, training=inputs["training"])
logits = self.classifier(inputs=sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ALBERT_START_DOCSTRING,
)
class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
self.qa_outputs = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(inputs=sequence_output)
start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
start_logits = tf.squeeze(input=start_logits, axis=-1)
end_logits = tf.squeeze(input=end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels=labels, logits=(start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
@add_start_docstrings(
"""
Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ALBERT_START_DOCSTRING,
)
class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.albert = TFAlbertMainLayer(config, name="albert")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(tensor=inputs["attention_mask"], shape=(-1, seq_length))
if inputs["attention_mask"] is not None
else None
)
flat_token_type_ids = (
tf.reshape(tensor=inputs["token_type_ids"], shape=(-1, seq_length))
if inputs["token_type_ids"] is not None
else None
)
flat_position_ids = (
tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
)
flat_inputs_embeds = (
tf.reshape(tensor=inputs["inputs_embeds"], shape=(-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.albert(
input_ids=flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids,
position_ids=flat_position_ids,
head_mask=inputs["head_mask"],
inputs_embeds=flat_inputs_embeds,
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
pooled_output = self.dropout(inputs=pooled_output, training=inputs["training"])
logits = self.classifier(inputs=pooled_output)
reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving
def serving(self, inputs: Dict[str, tf.Tensor]) -> TFMultipleChoiceModelOutput:
output = self.call(input_ids=inputs)
return self.serving_output(output)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
| [
"[email protected]"
]
| |
b907f96478917192ab46c9bd004800704b20c2dd | 25f79d934fe25d67f5f9bcf464c52736e684a532 | /singlecell/pipeline/map_patient_virus.py | eef68d84aecb45e9b4643fd3631259d378debba5 | [
"MIT"
]
| permissive | iosonofabio/Zanini_et_al_DENV_patients_2018 | f6e581a9db773fad49e491830fe36ab4b33a5c03 | 9d68c929d9d09d12ced9ade2d07673af2d142aa0 | refs/heads/master | 2023-02-20T18:44:22.603678 | 2018-09-23T18:27:28 | 2018-09-23T18:27:28 | 140,030,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,081 | py | # vim: fdm=indent
'''
author: Fabio Zanini
date: 03/06/18
content: Pipeline for virus mapping within patients AFTER the rough virus
reads have been identified in the Snakemake pipeline. The thing is
Snakemake is VERY slow to construct that graph ;-)
'''
import os
import sys
import numpy as np
import pysam
import glob
import subprocess as sp
import shutil
import argparse
from singlecell.filenames import experiments_foldername, get_stampy_exec_filename
def shell(call, env=None):
if env is None:
env = os.environ.copy()
return sp.run(call, check=True, shell=True, env=env)
def pq(query_qualities):
qstring = ''.join([chr(q + 33) for q in query_qualities])
return qstring
def rc(seq, qual):
d = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
return (''.join([d[x] for x in seq])[::-1], qual[::-1])
def read_dict(read):
seq = read.query_sequence
qual = pq(read.query_qualities)
# reverse reads in BAM are transformed into positive strand, go back
if read.is_reverse:
(seq, qual) = rc(seq, qual)
return {
'name': read.qname,
'seq': seq,
'qual': qual,
}
if __name__ == '__main__':
pa = argparse.ArgumentParser(description='Patient virus mapping pipeline')
pa.add_argument('--experiments', nargs='+', required=True,
help='experiments to process')
pa.add_argument('--virus', choices=['dengue', 'zika'],
default='dengue',
help='What virus to remap to')
args = pa.parse_args()
virus = args.virus
for expname in args.experiments:
print(expname)
root_fdn = experiments_foldername+expname+'/'
raw_reads_fn = root_fdn+virus+'_reads.bam'
raw_reads_fastq_fns = [root_fdn+virus+'_read1.fastq', root_fdn+virus+'_read2.fastq']
remap_reads_fn = root_fdn+virus+'_remapped.bam'
reference_fn = root_fdn+virus+'_reference_hybrid.fasta'
if os.path.isfile(remap_reads_fn):
print('Remapped already, skip')
continue
print('First, make fastqs out of the bam')
with pysam.AlignmentFile(raw_reads_fn, 'rb') as bamfile,\
open(raw_reads_fastq_fns[0], 'wt') as fr1,\
open(raw_reads_fastq_fns[1], 'wt') as fr2:
fr_out = [fr1, fr2]
readname = None
pair = []
bfs = [[], []]
for read in bamfile:
if (read.qname != readname) and (len(pair) == 2):
for bf, d in zip(bfs, pair):
bf.append('@{:}\n{:}\n+\n{:}\n'.format(
d['name'],
d['seq'],
d['qual']))
# Keep buffers from overflowing
if len(bfs[0]) > 1000:
for bf, fr in zip(bfs, fr_out):
fr.write(''.join(bf))
bfs = [[], []]
pair = [read_dict(read)]
readname = read.qname
elif (read.qname == readname) and (len(pair) == 1):
pair.append(read_dict(read))
readname = read.qname
# Special case for the initial line
elif readname is None:
pair.append(read_dict(read))
readname = read.qname
else:
raise ValueError('Mwo ya?')
# Empty buffers
for bf, fr in zip(bfs, fr_out):
fr.write(''.join(bf))
bfs = [[], []]
print('Remap via stampy')
output_sam=remap_reads_fn[:-3]+'sam'
output_index=remap_reads_fn[:-3]+'stidx'
output_hash=remap_reads_fn[:-3]+'sthash'
output_prefix_sg='/stampy/'+os.path.basename(output_index[:-6])
reference_folder=os.path.dirname(reference_fn)
reference_sg='/stampy_reference/'+os.path.basename(reference_fn)
input_sg=['/stampy_input/'+os.path.basename(i) for i in raw_reads_fastq_fns]
output_sam_sg='/stampy/'+os.path.basename(output_sam)
input_folder=os.path.dirname(raw_reads_fn)
output_folder=os.path.dirname(output_index)
stampy=get_stampy_exec_filename()
stampy_call='singularity run -B '+output_folder+':/stampy -B '+input_folder+':/stampy_input -B '+reference_folder+':/stampy_reference '+stampy
shell("rm -f {:} {:} {:}".format(output_sam, output_index, output_hash))
shell(stampy_call+" -G {:} {:}".format(output_prefix_sg, reference_sg))
shell(stampy_call+" -g {:} -H {:}".format(output_prefix_sg, output_prefix_sg))
shell(stampy_call+" -g {:} -h {:} -o {:} --inputformat=fastq --substitutionrate=0.05 --sensitive -M {:} {:}".format(output_prefix_sg, output_prefix_sg, output_sam_sg, input_sg[0], input_sg[1]))
shell("samtools view -bT {:} {:} > {:}".format(reference_fn, output_sam, remap_reads_fn))
shell("rm {:}".format(output_sam))
| [
"[email protected]"
]
| |
1979fca7aa9b1817738c9706a16ba34f22f64692 | 4908b1d34d69c1cb652f25049552562574e1075f | /2020/Day-22/Crab_Combat/example.py | 25da40021ae28995dac1a997eebd358fed3a5fe5 | [
"MIT"
]
| permissive | sreekesari-vangeepuram/adventofcode | 3d4ad98a25a30640182d928538b421e00ad8259d | 645531be0208affe042ac0328105b9ef3cfc9dbf | refs/heads/main | 2023-07-26T13:36:03.036721 | 2021-08-11T08:27:25 | 2021-08-11T08:27:25 | 317,850,039 | 1 | 0 | MIT | 2021-08-11T08:27:26 | 2020-12-02T12:08:13 | Go | UTF-8 | Python | false | false | 973 | py | #!/usr/bin/env python
from typing import List, Tuple
def play_space_cards(p1: List[int], p2: List[int]) -> Tuple[str, List[int]]:
b1, b2 = 0, 0 # buffer spaces for both players to space their cards
while len(p1) !=0 and len(p2)!= 0:
b1, b2 = p1.pop(0), p2.pop(0)
if b1 > b2:
p1.extend([b1, b2])
else:
p2.extend([b2, b1])
if len(p1) != 0:
return "Player_1", p1
return "Player_2", p2
def count_score(winner_deck: List[int]) -> int:
accumulator = 0
for card, multiplier in zip(winner_deck, list(reversed(range(1, len(winner_deck)+1)))):
accumulator += card * multiplier
return accumulator
decks = open("sample.txt").read().strip().split("\n\n")
player_1 = list(map(int, decks[0].split("\n")[1:]))
player_2 = list(map(int, decks[1].split("\n")[1:]))
winner, winner_deck = play_space_cards(player_1, player_2)
print(f"Combat: {winner} won with score {count_score(winner_deck)}!")
| [
"[email protected]"
]
| |
39d331e59d88c829c46113d50cfb446786f0fdfa | 0d78474be6255f053d69d081d69caed76e46fe48 | /aol/facilities/models.py | faab6649b33bb74829f6a6998b92ca45b8eba82b | []
| no_license | conwayb/aol | 5eff86ce1addaeb82d6437d1f548409e2b962e6e | d29538a502d028574e142baca508db5bfc4430ca | refs/heads/master | 2020-04-05T21:32:20.035371 | 2016-11-04T23:59:04 | 2016-11-04T23:59:04 | 12,762,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,885 | py | import requests
from django.contrib.gis.db import models
from django.contrib.gis.gdal import CoordTransform, SpatialReference
from django.contrib.gis.geos import Point
from django.db import transaction
class FacilityManager(models.Manager):
def to_kml(self, bbox):
return Facility.objects.all().extra(
select={'kml': 'st_askml(the_geom)'},
where=[
"the_geom && st_setsrid(st_makebox2d(st_point(%s, %s), st_point(%s, %s)), 3644)",
],
params=bbox
)
def reimport(self):
"""
Connects to the Oregon facility JSON endpoint and reimports all the
facilities
"""
response = requests.get("https://data.oregon.gov/resource/spxe-q5vj.json")
js = response.json()
# the data source uses WGS84 coords, so we have to transform them
gcoord = SpatialReference(4326)
mycoord = SpatialReference(3644)
trans = CoordTransform(gcoord, mycoord)
with transaction.atomic():
# wipe out the existing facilties
Facility.objects.all().delete()
for row in js:
try:
p = Point(float(row['location']['longitude']), float(row['location']['latitude']), srid=4326)
except KeyError:
continue
p.transform(trans)
f = Facility(
name=row['boating_facility_name'],
managed_by=row.get('managed_by', ''),
telephone=row.get('telephone', {}).get('phone_number', ''),
ramp_type=row.get('ramp_type_lanes', ''),
trailer_parking=row.get('trailer_parking', ''),
moorage=row.get('moorage', ''),
launch_fee=row.get('launch_fee', ''),
restroom=row.get('restroom', ''),
supplies=row.get('supplies', ''),
gas_on_water=row.get('gas_on_the_water', ''),
diesel_on_water=row.get('diesel_on_the_water', ''),
waterbody=row.get('waterbody', ''),
fish_cleaning=row.get('fish_cleaning_station', ''),
pumpout=row.get('pumpout', ''),
dump_station=row.get('dump_station', ''),
the_geom=p,
icon_url=row.get('boater_services', ''),
)
f.save()
class Facility(models.Model):
facility_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=254, db_column="facilityna")
waterbody = models.CharField(max_length=254)
islake = models.IntegerField()
type = models.CharField(max_length=254)
telephone = models.CharField(max_length=254)
ramp_type = models.CharField(max_length=254, db_column="ramp_type_")
moorage = models.CharField(max_length=254)
trailer_parking = models.CharField(max_length=254, db_column="trailer_pa")
transient = models.CharField(max_length=254)
launch_fee = models.CharField(max_length=254)
restroom = models.CharField(max_length=254)
supplies = models.CharField(max_length=254)
gas_on_water = models.CharField(max_length=254, db_column="gas_on_the")
diesel_on_water = models.CharField(max_length=254, db_column="diesel_on")
fish_cleaning = models.CharField(max_length=254, db_column="fish_clean")
pumpout = models.CharField(max_length=254)
dump_station = models.CharField(max_length=254, db_column="dump_stati")
managed_by = models.CharField(max_length=254)
latitude = models.FloatField()
longitude = models.FloatField()
boater_ser = models.CharField(max_length=254)
icon_url = models.CharField(max_length=254)
the_geom = models.PointField(srid=3644)
objects = FacilityManager()
class Meta:
db_table = "facility"
| [
"[email protected]"
]
| |
a292f0646f44750049a15d70ad355287e0aa934b | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /0301-0400/0388-Longest Absolute File Path/0388-Longest Absolute File Path.py | a86a2ef91fb97202c7e1d7bd2e4cdf25e89d83c6 | [
"MIT"
]
| permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 544 | py | class Solution:
def lengthLongestPath(self, input: str) -> int:
lens = [0]
maxLen = 0
for line in input.splitlines():
name = line.lstrip('\t')
level = len(line) - len(name)
if '.' in name:
maxLen = max(maxLen, lens[level] + len(name))
else:
if level + 1 == len(lens):
lens.append(lens[-1] + 1 + len(name))
else:
lens[level + 1] = lens[level] + 1 + len(name)
return maxLen
| [
"[email protected]"
]
| |
e18b0d3d437476da904df18390cea2ad2363d612 | 2b9397e9e26f7d97ce6983d36c9842ac773b70c6 | /workforce/migrations/0009_auto_20181015_0646.py | c4953694528ecce12900a7fff2ae42803176183d | []
| no_license | eakDev/aip-1 | 288ed7d7b8cf65c74b510f4f4e45292e3342796d | 3db2520e3c246e25e2cfa62e395a3ba6ebe37252 | refs/heads/main | 2023-05-02T08:57:42.449727 | 2021-05-23T10:16:59 | 2021-05-23T10:16:59 | 386,578,482 | 1 | 0 | null | 2021-07-16T09:15:22 | 2021-07-16T09:15:22 | null | UTF-8 | Python | false | false | 509 | py | # Generated by Django 2.1.1 on 2018-10-15 06:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workforce', '0008_projectsite'),
]
operations = [
migrations.AlterField(
model_name='employeeprofile',
name='project_site',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='site', to='workforce.ProjectSite'),
),
]
| [
"[email protected]"
]
| |
0a545d6d5673a0f28df670d76f65a70863e87890 | 8c451e438739d741a127342e93727f3bac80b63e | /contributions/HARMONY 2021/test_gen_sedml.py | 49fdc9d82e9772c2f6ac6a3f3baf4415b563de11 | []
| no_license | SED-ML/sedml-test-suite | a5d6c5858e81d615fa0ba7bcaa7d3af90ae55c47 | 853d8cdac8987bdf9b901936c3c8888455602212 | refs/heads/master | 2023-06-14T00:02:58.086947 | 2021-07-07T23:45:57 | 2021-07-07T23:45:57 | 47,284,156 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,424 | py | r"""
####################################################################################################
tellurium 2.2.1
-+++++++++++++++++- Python Environment for Modeling and Simulating Biological Systems
.+++++++++++++++.
.+++++++++++++. Homepage: http://tellurium.analogmachine.org/
-//++++++++++++/. -:/-` Documentation: https://tellurium.readthedocs.io/en/latest/index.html
.----:+++++++/.++ .++++/ Forum: https://groups.google.com/forum/#!forum/tellurium-discuss
:+++++: .+:` .--++ Bug reports: https://github.com/sys-bio/tellurium/issues
-+++- ./+:-://. Repository: https://github.com/sys-bio/tellurium
.+. `...`
SED-ML simulation experiments: http://www.sed-ml.org/
# Change back to the original (with 'getName') when libsedml is fixed
sedmlDoc: L1V4
inputType: 'SEDML_STRING'
workingDir: 'C:\Users\Lucian\Desktop\tellurium'
saveOutputs: 'False'
outputDir: 'None'
plottingEngine: '<MatplotlibEngine>'
Windows-10-10.0.19041-SP0
python 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:37:02) [MSC v.1924 64 bit (AMD64)]
####################################################################################################
"""
import tellurium as te
from roadrunner import Config
from tellurium.sedml.mathml import *
from tellurium.sedml.tesedml import process_trace, terminate_trace, fix_endpoints
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
try:
import libsedml
except ImportError:
import tesedml as libsedml
import pandas
import os.path
Config.LOADSBMLOPTIONS_RECOMPILE = True
workingDir = r'C:\Users\Lucian\Desktop\tellurium'
# --------------------------------------------------------
# Models
# --------------------------------------------------------
# Model <model0>
model0 = te.loadSBMLModel(os.path.join(workingDir, 'hill.xml'))
# --------------------------------------------------------
# Tasks
# --------------------------------------------------------
# Task <task0>
# not part of any DataGenerator: task0
# Task <task1>
task1 = []
# Task: <task0>
task0 = [None]
model0.setIntegrator('cvode')
if model0.conservedMoietyAnalysis == True: model0.conservedMoietyAnalysis = False
__range__uniform_linear_for_n = np.linspace(start=1.0, stop=15.0, num=26)
for __k__uniform_linear_for_n, __value__uniform_linear_for_n in enumerate(__range__uniform_linear_for_n):
model0.reset()
model0['n'] = __value__uniform_linear_for_n
model0.timeCourseSelections = ['n', 'time', '[S2]']
model0.reset()
task0[0] = model0.simulate(start=0.0, end=35.0, steps=30)
task1.extend(task0)
# --------------------------------------------------------
# DataGenerators
# --------------------------------------------------------
# DataGenerator <plot_0_0_0>
__var__task1_____time = np.column_stack([sim['time'] for sim in task1])
if len(__var__task1_____time.shape) == 1:
__var__task1_____time.shape += (1,)
plot_0_0_0 = __var__task1_____time
# DataGenerator <plot_0_0_1>
__var__task1_____n = np.column_stack([sim['n'] for sim in task1])
if len(__var__task1_____n.shape) == 1:
__var__task1_____n.shape += (1,)
plot_0_0_1 = __var__task1_____n
# DataGenerator <plot_0_0_2>
__var__task1_____S2 = np.column_stack([sim['[S2]'] for sim in task1])
if len(__var__task1_____S2.shape) == 1:
__var__task1_____S2.shape += (1,)
plot_0_0_2 = __var__task1_____S2
# --------------------------------------------------------
# Outputs
# --------------------------------------------------------
# Output <plot_0>
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(num=None, figsize=(9, 5), dpi=80, facecolor='w', edgecolor='k')
from matplotlib import gridspec
__gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
ax = plt.subplot(__gs[0])
ax.pcolormesh(plot_0_0_0, plot_0_0_1, plot_0_0_2, color='#1f77b4', linewidth=1.5, alpha=1.0, label='task1.S2', cmap='RdBu', shading='auto')
ax.set_title('UniformTimecourse', fontweight='bold')
ax.set_xlabel('task1.time', fontweight='bold')
ax.set_ylabel('task1.n', fontweight='bold')
plt.tick_params(axis='both', which='major', labelsize=10)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.savefig(os.path.join(workingDir, 'plot_0.png'), dpi=100)
plt.show()
#################################################################################################### | [
"[email protected]"
]
| |
a780c71aa45f05efbbf6ac177b608b0cc54997b7 | 71d3e539e3ba8ab06d61bfb3683414a129a4d744 | /detecting_objects/image_evaluator/src/image_evaluator.py | 76b3bf77e4ab4ce278f0921170cf311b8441bb6f | []
| no_license | browlm13/Basketball-Shot-Detection | b0dfbc0de3129917697b077a59f5519a7faecc57 | 8ea4e35effdf2c6f02d2d275cd3d48d9da218608 | refs/heads/master | 2021-10-27T21:10:11.241146 | 2019-04-19T18:50:57 | 2019-04-19T18:50:57 | 105,725,060 | 83 | 16 | null | null | null | null | UTF-8 | Python | false | false | 20,400 | py | #python3
"""
Image Evaluator Class
img_eval = Image_Evaluator()
# Loading Models - Todo: store in file so only model name has to be used
BASKETBALL_MODEL = {'name' : 'basketball_model', 'paths' : {'frozen graph': PATH_TO_FROZEN_GRAPH, 'labels' : PATH_TO_LABELS}}
PERSON_MODEL = {'name' : 'person_model', 'paths' : {'frozen graph': PATH_TO_FROZEN_GRAPH, 'labels' : PATH_TO_LABELS}}
img_eval.load_models([BASKETBALL_MODEL, PERSON_MODEL])
todo: img_eval.annotate_directory(image_directory, annotations_directory) #Add selected categories and minscores
todo: cropping
"""
import numpy as np
import os
from PIL import Image
import PIL.Image as Image
import xml.etree.ElementTree as ET
from xml.dom import minidom
import tensorflow as tf
#from utils import label_map_util
from image_evaluator.src.utils import label_map_util
import glob
import shutil
#from shutil import copyfile
#from shutil import copy
class Image_Evaluator:
def __init__(self):
self.models = []
self.categories = {}
def load_models(self, model_list):
#Todo: ensure existance
self.models = model_list
#determine categories
for m in self.models:
#get each models label dict
m['categories'] = label_map_util.get_label_map_dict( m['paths']['labels'], use_display_name=m['use_display_name'] )
#go through models, for each unique category list all models that can identify, use first as evaluation model
for m in self.models:
for key in m['categories']:
if key in self.categories:
self.categories[key]['models'].append(m['name'])
else:
self.categories[key] = {'models' : [m['name']], 'evaluation_model' : m['name']}
#set all evaluaton models used (what needs to be loaded into memory for image evaluation)
def get_evaluation_models(self):
evaluation_models = []
for c in self.categories:
if self.categories[c]['evaluation_model'] not in evaluation_models:
evaluation_models.append(self.categories[c]['evaluation_model'])
return evaluation_models
def set_category_evaluation_model(self, category_name, model_name):
self.categories[category_name]['evaluation_model'] = model_name
#path, folder, filename
def get_path_data(self, path):
folder = os.path.basename(os.path.dirname(path))
filename = os.path.basename(path)
return path, folder, filename
def get_model_path(self, model_name, file_name):
path = ""
for model in self.models:
if model['name'] == model_name:
path = model['paths'][file_name]
return path
def get_model_categories_dict(self, model_name):
for model in self.models:
if model['name'] == model_name:
return model['categories']
def get_model_evaluation_categories(self, model_name):
evaluation_categories = []
for c in self.categories:
if self.categories[c]['evaluation_model'] == model_name:
evaluation_categories.append(c)
return evaluation_categories
def load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def image_dimensions(self, image_np):
image_pil = Image.fromarray(np.uint8(image_np)).convert('RGB')
return image_pil.size
#
# Writing Image XML annotations
#
def swap_exentsion(self, full_filename, new_extension):
template = "%s.%s" # filename, extension
filename_base, old_extension = os.path.splitext(full_filename)
return template % (filename_base, new_extension.strip('.'))
def generate_new_filename(self, output_directory_path, image_info, new_extension):
new_filename = self.swap_exentsion(image_info['image_filename'], new_extension)
full_path = os.path.join(output_directory_path, new_filename)
return full_path
def generate_xml_string(self, image_info):
image_data = {}
image_data['path'] = image_info['image_path']
image_data['folder'] = image_info['image_folder']
image_data['filename'] = image_info['image_filename']
image_data['width'] = image_info['image_width']
image_data['height'] = image_info['image_height']
image_data['depth'] = 3
#unspecifeid
image_data['database'] = 'NA'
image_data['segmented'] = 0
image_data['objects'] = []
for item in image_info['image_items_list']:
o = {}
o['name'] = item['class']
xmin, xmax, ymin, ymax = item['box']
o['xmin'] = xmin
o['ymin'] = ymin
o['xmax'] = xmax
o['ymax'] = ymax
#unspecifeid
o['pose'] = 'Unspecified'
o['truncated'] = 0
o['difficult'] = 0
image_data['objects'].append(o)
# create XML
annotation_tag = ET.Element('annotation')
folder_tag = ET.SubElement(annotation_tag, 'folder')
folder_tag.text = image_data['folder']
filename_tag = ET.SubElement(annotation_tag, 'filename')
filename_tag.text = image_data['filename']
path_tag = ET.SubElement(annotation_tag, 'path')
path_tag.text = image_data['path']
source_tag = ET.SubElement(annotation_tag, 'source')
database_tag = ET.SubElement(source_tag, 'database')
database_tag.text = image_data['database']
size_tag = ET.SubElement(annotation_tag, 'size')
width_tag = ET.SubElement(size_tag, 'width')
width_tag.text = str(image_data['width'])
height_tag = ET.SubElement(size_tag, 'height')
height_tag.text = str(image_data['height'])
depth_tag = ET.SubElement(size_tag, 'depth')
depth_tag.text = str(image_data['depth'])
segmented_tag = ET.SubElement(annotation_tag, 'segmented')
segmented_tag.text = str(0)
for o in image_data['objects']:
object_tag = ET.SubElement(annotation_tag, 'object')
name_tag = ET.SubElement(object_tag, 'name')
name_tag.text = o['name']
pose_tag = ET.SubElement(object_tag, 'pose')
pose_tag.text = o['pose']
truncated_tag = ET.SubElement(object_tag, 'truncated')
truncated_tag.text = str(o['truncated'])
difficult_tag = ET.SubElement(object_tag, 'difficult')
difficult_tag.text = str(o['difficult'])
bndbox_tag = ET.SubElement(object_tag, 'bndbox')
xmin_tag = ET.SubElement(bndbox_tag, 'xmin')
xmin_tag.text = str(o['xmin'])
ymin_tag = ET.SubElement(bndbox_tag, 'ymin')
ymin_tag.text = str(o['ymin'])
xmax_tag = ET.SubElement(bndbox_tag, 'xmax')
xmax_tag.text = str(o['xmax'])
ymax_tag = ET.SubElement(bndbox_tag, 'ymax')
ymax_tag.text = str(o['ymax'])
#return ET.tostring(annotation_tag).decode('utf-8')
dom = minidom.parseString(ET.tostring(annotation_tag).decode('utf-8'))
return dom.toprettyxml(indent='\t')
def write_xml_file(self, image_info, outpath):
# if directorydoes not exist, create it
if not os.path.exists(outpath):
os.makedirs(outpath)
xml_string = self.generate_xml_string(image_info)
xml_filename = self.generate_new_filename(outpath, image_info, 'xml')
with open(xml_filename, "w") as f:
f.write(xml_string)
def filter_minimum_score_threshold(self, image_info_bundel, min_score_thresh):
filtered_image_info_bundel = {}
for image_path, image_info in image_info_bundel.items():
filtered_image_info_bundel[image_path] = image_info
filtered_image_items_list = []
for item in image_info['image_items_list']:
if item['score'] > min_score_thresh:
filtered_image_items_list.append(item)
filtered_image_info_bundel[image_path]['image_items_list'] = filtered_image_items_list
return filtered_image_info_bundel
def filter_selected_categories(self, image_info_bundel, selected_categories_list):
filtered_image_info_bundel = {}
for image_path, image_info in image_info_bundel.items():
filtered_image_info_bundel[image_path] = image_info
filtered_image_items_list = []
for item in image_info['image_items_list']:
if item['class'] in selected_categories_list:
filtered_image_items_list.append(item)
filtered_image_info_bundel[image_path]['image_items_list'] = filtered_image_items_list
return filtered_image_info_bundel
def _image_info(self, category_index, selected_categories, image_np, boxes, scores, classes, min_score_thresh=0.0001):
# retrieve image size
image_pil = Image.fromarray(np.uint8(image_np)).convert('RGB')
im_width, im_height = image_pil.size
#box, class, score
item_list = []
for i in range(boxes.shape[0]):
if scores is None or scores[i] > min_score_thresh:
item = {}
#
# box
#
normalized_box = tuple(boxes[i].tolist())
n_ymin, n_xmin, n_ymax, n_xmax = normalized_box
box = (int(n_xmin * im_width), int(n_xmax * im_width), int(n_ymin * im_height), int(n_ymax * im_height)) #(left, right, top, bottom)
item['box'] = box
#
# class name
#
class_name = 'NA'
if classes[i] in category_index.keys():
class_name = str(category_index[classes[i]]['name'])
item['class'] = class_name
#
# detection score
#
item['score'] = 100*scores[i]
# add if class is in selected_classes, to ensure only evaluation model is evalutating
if item['class'] in selected_categories:
item_list.append(item)
return item_list
def get_image_info(self, image_path_list, min_score_thresh=None, prevent_overlap=True):
image_info_bundel = dict((image_path, {'image_items_list':[], 'image_folder':'', 'image_filename':'','image_path':'', 'image_height':-1, 'image_width':-1}) for image_path in image_path_list) #key= path, value is cobined item list
# for each unique model evaluator in categories list perform detection
for model_name in self.get_evaluation_models():
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.get_model_path(model_name, 'frozen graph'), 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
path_to_labels = self.get_model_path(model_name, 'labels')
label_map = label_map_util.load_labelmap(path_to_labels)
categories_dict = self.get_model_categories_dict(model_name)
num_classes = len(categories_dict)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
#
# Detection
#
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Definite input and output Tensors for detection_graph
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each box represents a part of the image where a particular object was detected.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') # Each score represent how level of confidence for each of the objects.
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
#
# Image Detection Loop
#
for image_path in image_path_list:
#
# prepare image for model input
#
#tmp non relitive path test
script_dir = os.path.dirname(os.path.abspath(__file__))
image = Image.open(os.path.join(script_dir, image_path))
#image = Image.open(image_path)
image_np = self.load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0) # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
#
# Detection
#
(boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
"""
# new code
if prevent_overlap:
iou_threshold = 0.5 #overlap threshold
max_output_size = 2 #max num boxes overlap threshold
selected_indices = tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold)
boxes = tf.gather(boxes, selected_indices) #returns selected boxes
scores = tf.gather(np.squeeze(scores), selected_indices) #returns selected
classes = tf.gather(np.squeeze(classes), selected_indices) #returns selected
"""
#
# Reformat results
#
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
#
# Get selected items (box, class, score)
#
#selected classes are all categories current model is set to evaluate
selected_categories = self.get_model_evaluation_categories(model_name)
image_items_list = []
if min_score_thresh is not None:
mst_decimal = min_score_thresh * 0.01 #convert to decimal
image_items_list = self._image_info(category_index, selected_categories, image_np, boxes, scores, classes, mst_decimal)
else:
image_items_list = self._image_info(category_index, selected_categories, image_np, boxes, scores, classes)
# add to / combine image items list
image_info_bundel[image_path]['image_items_list'] += image_items_list
#
# meta data - PLEASE STORE FOR USE IN XML ANNOTATIONS
#
image_path, image_folder, image_filename = self.get_path_data(image_path)
image_height, image_width = self.image_dimensions(image_np)
image_info_bundel[image_path]['image_path'] = image_path
image_info_bundel[image_path]['image_folder'] = image_folder
image_info_bundel[image_path]['image_filename'] = image_filename
image_info_bundel[image_path]['image_height'] = image_height
image_info_bundel[image_path]['image_width'] = image_width
return image_info_bundel
def remove_string_start_end_whitespace(self, string):
if string[0] == ' ':
string = string[1:]
if string[-1] == ' ':
string = string[:-1]
return string
def category_2_symbol(self, category_name):
return category_name.strip()
def _any(self, category_name, min_score, image_items_list):
""" return True if one or more of the category name was detected above minimum score """
for item in image_items_list:
if (item['class'] == category_name) and (item['score'] > min_score): return True
return False
def _num(self, category_name, min_score, image_items_list):
""" return number of the category name detected above minimum score """
num_detected = 0
for item in image_items_list:
if (item['class'] == category_name) and (item['score'] > min_score): num_detected += 1
return num_detected
def boolean_image_evaluation(self, image_path_list, boolean_categories_present):
""" accepts list of paths to images and common boolean expression of categories present ex: any('person',30.0) or (num('basketball', 60.0) > 2)"""
image_info_bundel = self.get_image_info(image_path_list)
image_boolean_bundel = dict((image_path, False) for image_path in image_path_list) #key= path, value is set to false initally
for image_path, image_info in image_info_bundel.items():
any = lambda category_name, min_score : self._any(category_name, min_score, image_info['image_items_list'])
num = lambda category_name, min_score : self._num(category_name, min_score, image_info['image_items_list'])
scope = locals()
image_boolean_bundel[image_path] = eval(boolean_categories_present, scope)
return image_boolean_bundel, image_info_bundel
def move_images_bool_rule(self, input_image_directory_path, image_output_directory_path, bool_rule, annotations_output_directory_path = False, annotations_min_score_thresh=None, annotations_selected_category_list=None):
""" given input directory of images (currently JPEG), move selected images that satisfy bool rule to new directory, create annotation directory (xml) if specifeid. """
# get all image paths in directory
accpeted_extensions = ['jpg', 'JPEG', 'jpeg']
image_path_list = []
for extension in accpeted_extensions:
glob_phrase = os.path.join(input_image_directory_path, '*.' + extension)
for image_path in glob.glob(glob_phrase):
#check image can be reshpaed tmp
try:
script_dir = os.path.dirname(os.path.abspath(__file__))
image = Image.open(os.path.join(script_dir, image_path))
image_np = self.load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0) # Expand dimensions since the model
#add
image_path_list += [image_path]
#tmp
print(image_path)
except:
print("error loading: %s" % image_path)
# evaluate
image_boolean_bundel, image_info_bundel = self.boolean_image_evaluation(image_path_list, bool_rule)
# if image output directory does not exist, create it
if not os.path.exists(image_output_directory_path): os.makedirs(image_output_directory_path)
# copy images over with same basename
for image_path, copy_bool in image_boolean_bundel.items():
if copy_bool: shutil.copy(image_path, image_output_directory_path)
#annotations
# if image output directory does not exist, create it
if annotations_output_directory_path is not False:
if not os.path.exists(annotations_output_directory_path): os.makedirs(annotations_output_directory_path)
#filter selected categories and min score threshold for image_info_bundel
if annotations_selected_category_list is not None:
image_info_bundel = self.filter_selected_categories(image_info_bundel, annotations_selected_category_list)
if annotations_min_score_thresh is not None:
image_info_bundel = self.filter_minimum_score_threshold(image_info_bundel, annotations_min_score_thresh)
#change image location data and write xml file
for image_path, image_info in image_info_bundel.items():
#if bool statment is true
if image_boolean_bundel[image_path]:
#change image location info
new_image_info = image_info
new_image_filename = os.path.basename(image_path) #same technically
new_image_folder = os.path.basename(image_output_directory_path)
new_image_path = os.path.join(image_output_directory_path, new_image_filename)
new_image_info['image_path'] = new_image_path
new_image_info['image_folder'] = new_image_folder
new_image_info['image_filename'] = new_image_filename
#write
self.write_xml_file(new_image_info, annotations_output_directory_path)
def run():
pass
"""
BASKETBALL_MODEL = {'name' : 'basketball_model_v1', 'use_display_name' : False, 'paths' : {'frozen graph': "models/basketball_model_v1/frozen_inference_graph/frozen_inference_graph.pb", 'labels' : "models/basketball_model_v1/label_map.pbtxt"}}
PERSON_MODEL = {'name' : 'ssd_mobilenet_v1_coco_2017_11_17', 'use_display_name' : True, 'paths' : {'frozen graph': "models/ssd_mobilenet_v1_coco_2017_11_17/frozen_inference_graph/frozen_inference_graph.pb", 'labels' : "models/ssd_mobilenet_v1_coco_2017_11_17/mscoco_label_map.pbtxt"}}
ie = Image_Evaluator()
ie.load_models([BASKETBALL_MODEL, PERSON_MODEL])
image_input_base_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/google-images-download/downloads/"
# for each directory in downloads
for image_dir_path in glob.glob(image_input_base_directory_path + "/*/"):
dirname = os.path.basename(image_dir_path[:-1])
print(dirname)
image_input_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/google-images-download/downloads/" + dirname
image_output_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/gather/%s_images" % dirname
annotation_output_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/gather/%s_annotations" % dirname
bool_rule = "(any('basketball', 85.0) and not any('person', 15.0)) or ((num('person', 95.0) == 1) and not any('basketball', 15.0) and (num('person', 15.0) == 1)) or (any('basketball', 85.0) and (num('person', 95.0) ==1) and (num('person', 15.0) == 1))"
#print(image_input_directory_path)
#print(image_output_directory_path)
#print(annotation_output_directory_path)
ie.move_images_bool_rule(image_input_directory_path, image_output_directory_path, bool_rule, annotation_output_directory_path, 85.0, ['basketball', 'person'])
"""
| [
"[email protected]"
]
| |
0b4ada6ee9bc3a49589ff4a8dc8621b475fb3800 | 822c566d3fe100f216284276d94341527a4af7a1 | /class_based/spin/spin/asgi.py | bb331524cd68a94bd0362c3e57157d55b83b6cd6 | []
| no_license | ybgirgin3/spin | 9f96016f17a6e77faa147bff47733b70da16014d | 94afbcf62714d1f6c3b89c661390740fedb3c9ac | refs/heads/main | 2023-06-10T17:48:45.538831 | 2021-06-23T17:15:16 | 2021-06-23T17:15:16 | 377,943,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
ASGI config for spin project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'spin.settings')
application = get_asgi_application()
| [
"[email protected]"
]
| |
8a0848652a216c54c6483dd93481724a0b600cde | 1d70bed8b3e7314cac8a1b5cb8e20a98924d0746 | /gdp and stock predicton/modules.py | 6d2685b1da0a102bf5bcb75c678ec0dfd2a0d57a | []
| no_license | bateikoEd/dipl_program | 02d46f2342d2814ed58181f38f9a781effeedd05 | 0b885c436cda096c80fe2b445337dc7e0bf16ba0 | refs/heads/main | 2023-07-24T05:42:05.509338 | 2021-09-06T06:36:18 | 2021-09-06T06:36:18 | 344,238,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,647 | py | import pandas as pd
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.model_selection import cross_val_score
import numpy as np
from sklearn.metrics import r2_score
# from sklearn.metrics import mean_absolute_percentage_error
from statsmodels.stats.stattools import durbin_watson
from sklearn.metrics import explained_variance_score
def barplot(data, title):
# fig = plt.figure(figsize=(18,6))
bar_plot = sns.barplot(x=data['feature'], y=data['value'])
for item in bar_plot.get_xticklabels():
item.set_rotation(90)
plt.title(title)
plt.show()
def get_score_for_model(models, X_train, y_train, scoring, n_splits=3,print_res=True):
def append_res_to_boxplot():
i = 0
df = pd.DataFrame()
while i < len(results[0]):
line = [[num[i], ml] for num, ml in zip(results, names)]
# for num, ml in zip(results, names):
# line.append([num[i],ml])
i = i + 1
df = df.append(pd.DataFrame(line, columns=[scoring, 'ML']), ignore_index=True)
return df
seed = 13
results = []
means = []
sdv = []
names = []
scoring = scoring
for name, model in models:
strat = KFold(n_splits=n_splits, random_state=seed, shuffle=True)
cv_results = cross_val_score(model, X_train, y_train, cv=strat, scoring=scoring, n_jobs=-1)
results.append(cv_results)
names.append(name)
means.append(cv_results.mean())
sdv.append(cv_results.std())
if print_res:
print(f"{names[-1]}: {means[-1]} ({sdv[-1]})")
box_plot = append_res_to_boxplot()
df_means = pd.DataFrame({'ML': names, 'means': means, 'std': sdv})
return box_plot, df_means
def define_metrics(model, X_train_, X_test_, y_train, y_test, name):
pred_train_ = np.array(model.predict(X_train_))
pred_test_ = np.array(model.predict(X_test_))
y_train_ = np.array(y_train)
y_test_ = np.array(y_test)
metric_train = pd.DataFrame()
metric_train['name'] = [name + '_train']
metric_train['r2'] = [r2_score(y_train, pred_train_)]
metric_train['sum_squared_resid'] = np.sum((y_train_ - pred_train_)**2)
metric_train['MAPE'] = [np.mean(np.abs((y_train - pred_train_) / y_train)) * 100]
metric_train['RMSE'] = [np.sqrt(np.mean((y_train - pred_train_)**2))]
metric_train['durbin_watson'] = [durbin_watson(y_train - pred_train_)]
metric_train['theil_index'] = [np.sqrt((1/len(pred_train_))*np.sum((y_train_-pred_train_)**2))
/ (np.sqrt((1/len(y_train_))*np.sum(y_train_**2)) + np.sqrt((1/len(pred_train_))*np.sum(pred_train_**2)))]
metric_train['ex_var'] = [explained_variance_score(y_train, pred_train_)]
metric_test = pd.DataFrame()
metric_test['name'] = [name + '_test']
metric_test['r2'] = [r2_score(y_test, pred_test_)]
metric_test['sum_squared_resid'] = np.sum((y_test_ - pred_test_)**2)
metric_test['MAPE'] = [np.mean(np.abs((y_test - pred_test_) / y_test)) * 100]
metric_test['RMSE'] = [np.sqrt(np.mean((y_test - pred_test_) ** 2))]
metric_test['durbin_watson'] = [durbin_watson(y_test - pred_test_)]
metric_test['theil_index'] = [np.sqrt((1/len(pred_test_))*np.sum((y_test_-pred_test_)**2))
/ (np.sqrt((1/len(y_test_))*np.sum(y_test_**2)) + np.sqrt((1/len(pred_test_))*np.sum(pred_test_**2)))]
metric_test['ex_var'] = [explained_variance_score(y_test, pred_test_)]
return metric_train.append(metric_test)
if __name__ == '__main__':
pass
| [
"[email protected]"
]
| |
8d3a150e92b97edc73a1af8bcfa9566c2296219c | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/.install/.backup/lib/surface/pubsub/subscriptions/seek.py | 718094747211caab81d5b553f97be853d2cb982b | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 3,886 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Pub/Sub subscriptions seek command."""
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.command_lib.pubsub import util
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SeekAlpha(base.Command):
"""This feature is part of an invite-only release of the Cloud Pub/Sub API.
Resets a subscription's backlog to a point in time or to a given snapshot.
This feature is part of an invitation-only release of the underlying
Cloud Pub/Sub API. The command will generate errors unless you have access to
this API. This restriction should be relaxed in the near future. Please
contact [email protected] with any questions in the meantime.
"""
@staticmethod
def Args(parser):
"""Registers flags for this command."""
parser.add_argument('subscription',
help='Name of the subscription to affect.')
seek_to_parser = parser.add_mutually_exclusive_group(required=True)
seek_to_parser.add_argument(
'--time', type=arg_parsers.Datetime.Parse,
help=('The time to seek to. Messages in the subscription that '
'were published before this time are marked as acknowledged, and '
'messages retained in the subscription that were published after '
'this time are marked as unacknowledged. See `gcloud topic '
'datetimes` for information on time formats.'))
seek_to_parser.add_argument(
'--snapshot',
help=('The name of the snapshot. The snapshot\'s topic must be the same'
' as that of the subscription.'))
parser.add_argument(
'--snapshot-project', default='',
help=('The name of the project the snapshot belongs to (if seeking to '
'a snapshot). If not set, it defaults to the currently selected '
'cloud project.'))
def Collection(self):
return util.SUBSCRIPTIONS_SEEK_COLLECTION
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
A serialized object (dict) describing the results of the operation. This
description fits the Resource described in the ResourceRegistry under
'pubsub.subscriptions.seek'.
"""
msgs = self.context['pubsub_msgs']
pubsub = self.context['pubsub']
subscription_path = util.SubscriptionFormat(args.subscription)
result = {'subscriptionId': subscription_path}
seek_req = msgs.SeekRequest()
if args.snapshot:
if args.snapshot_project:
snapshot_project = (
projects_util.ParseProject(args.snapshot_project).Name())
else:
snapshot_project = ''
seek_req.snapshot = util.SnapshotFormat(args.snapshot, snapshot_project)
result['snapshotId'] = seek_req.snapshot
else:
seek_req.time = args.time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
result['time'] = seek_req.time
pubsub.projects_subscriptions.Seek(
msgs.PubsubProjectsSubscriptionsSeekRequest(
seekRequest=seek_req, subscription=subscription_path))
return result
| [
"[email protected]"
]
| |
5a57d709e68b57343a2f490cf6f527c2a7bb2503 | e18c84358b2a80159b37dcea39debfbbdaa66395 | /backend/api/views/image_c.py | 0dcc38ac99342faa71280bd72d3802a93a490817 | [
"MIT"
]
| permissive | chunyenHuang/Disfactory | 49d404609b73783ac488be9430d9cf518fc19f64 | 52985f7aadc8ca56344f80000b5e943bea99f83d | refs/heads/master | 2021-01-03T01:54:40.415165 | 2020-01-22T04:09:29 | 2020-01-22T04:09:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | from django.conf import settings
from django.http import HttpResponse, JsonResponse
import django_q.tasks
from rest_framework.decorators import api_view
from ..models import Image
from .utils import (
_is_image,
_get_image_original_date,
)
@api_view(['POST'])
def post_image(request):
f_image = request.FILES['image']
if _is_image(f_image):
f_image.seek(0)
image_original_date = _get_image_original_date(f_image)
kwargs = {
'image_path': '',
'orig_time': image_original_date,
}
img = Image.objects.create(**kwargs)
f_image.seek(0)
django_q.tasks.async_task('api.tasks.upload_image', f_image.read(), settings.IMGUR_CLIENT_ID, img.id)
return JsonResponse({"token": img.id})
return HttpResponse(
"The uploaded file cannot be parsed to Image",
status=400,
)
| [
"[email protected]"
]
| |
a788ecad5cc912d6405ede696a2f16263c295b76 | 8126d1bc2afe0925a24fce039d0f02a3bd7acbae | /pytraj/c_action/__init__.py | de635542285540646e5470bb9b3a11a2de034598 | [
"BSD-2-Clause"
]
| permissive | rafwiewiora/pytraj | 54fb6fe07a754f65b865dd161f64c7af15fc3926 | 91a019ea406081ccf0043170cc64c48b4a5ea04a | refs/heads/master | 2021-01-20T17:33:05.974254 | 2016-03-11T21:25:32 | 2016-03-11T21:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | """"""
from __future__ import absolute_import
from . import c_action
actionlist = []
for act in c_action.__dict__.keys():
if 'Action' in act:
actionlist.append(act)
__all__ = actionlist
__doc__ = "\n".join(__all__)
| [
"[email protected]"
]
| |
cfa8945289850ff63e497fcc908de2732efb4faf | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/Scaleform/daapi/view/battle/event/hunter_respawn.py | d81a741d398ce19a72f4ca18421e45b81afc015c | []
| no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 1,419 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/event/hunter_respawn.py
import BigWorld
from gui.Scaleform.daapi.view.battle.event.boss_teleport import EventBossTeleportView
from gui.Scaleform.daapi.view.meta.EventHunterRespawnViewMeta import EventHunterRespawnViewMeta
from gui.wt_event.wt_event_helpers import getSpeed
from gui.impl import backport
from gui.impl.gen import R
from gui.shared.gui_items.Vehicle import getIconResourceName
class EventHunterRespawnView(EventBossTeleportView, EventHunterRespawnViewMeta):
def onRespawnPointClick(self, pointGuid):
self._chooseSpawnPoint(pointGuid)
def showSpawnPoints(self):
self._blur.enable()
timeLeft = 0
timeTotal = 0
respawnComponent = BigWorld.player().dynamicComponents.get('respawnComponent')
if respawnComponent:
timeLeft = respawnComponent.endTime - BigWorld.serverTime()
timeTotal = respawnComponent.duration
self.as_updateTimerS(timeLeft, timeTotal, replaySpeed=getSpeed())
vTypeVO = self._sessionProvider.getCtx().getVehicleInfo(BigWorld.player().playerVehicleID).vehicleType
iconName = getIconResourceName(vTypeVO.iconName)
icon = R.images.gui.maps.icons.wtevent.hunterRespawn.dyn(iconName)
if icon.exists():
self.as_setIconS(backport.image(icon()))
| [
"[email protected]"
]
| |
894b89da175042a21e5348d0f8d8a09fff415934 | 7e38a33607e1496a8e25da2ea4de93aba0f7a6db | /tests/test_tx.py | 148ede775632a48aacff47ec1ba3cf5ad4fee695 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | AustEcon/bitcoinX | c01f96626c72765a884f0f08f171aee5bb47dbe9 | 06796905626ffebf7b1eb92238b1771f103efee8 | refs/heads/master | 2022-12-03T06:00:09.898506 | 2022-11-21T07:16:42 | 2022-11-21T07:16:42 | 260,636,435 | 1 | 0 | MIT | 2020-05-02T07:29:44 | 2020-05-02T07:29:44 | null | UTF-8 | Python | false | false | 19,997 | py | from io import BytesIO
import random
import pytest
from bitcoinx import (
Script, PublicKey, SigHash, hash_to_hex_str, Bitcoin, BitcoinTestnet, JSONFlags,
)
from bitcoinx.tx import *
from bitcoinx.tx import LOCKTIME_THRESHOLD
from .utils import read_tx, read_tx_hex, read_signature_hashes, read_json_tx
def test_tx_read():
tx = read_tx('b59de025.txn')
assert tx.version == 2
assert len(tx.inputs) == 7
assert len(tx.outputs) == 3
assert tx.locktime == 0
def test_from_bytes():
tx_bytes = bytes.fromhex(read_tx_hex('b59de025.txn'))
tx = Tx.from_bytes(tx_bytes)
assert tx.to_bytes() == tx_bytes
def test_from_hex():
tx_hex = read_tx_hex('b59de025.txn')
tx = Tx.from_hex(tx_hex)
assert tx.to_hex() == tx_hex
def test_to_bytes_to_hex():
tx_hex = read_tx_hex('b59de025.txn')
tx = Tx.from_hex(tx_hex)
assert tx.to_bytes() == bytes.fromhex(tx_hex)
assert tx.to_hex() == tx_hex
def test_repr():
tx = read_tx('afda808f.txn')
assert repr(tx) == (
'Tx(version=1, inputs=[TxInput(prev_hash="00000000000000000000000000000000000000000000'
'00000000000000000000", prev_idx=4294967295, script_sig="0319c4082f626d67706f6f6c2e636f6d2'
'f5473537148110d9e7fcc3cf74ee70c0200", sequence=4294967295)], outputs=[TxOutput(value='
'1250005753, script_pubkey="76a914db1aea84aad494d9f5b253327da23c4e51266c9388ac")], '
'locktime=0)'
)
tx_testcases = ['503fd37f.txn']
@pytest.mark.parametrize("filename", tx_testcases)
def test_signature_hash(filename):
tx, values, pk_scripts = read_json_tx(filename)
correct_hashes = read_signature_hashes(filename.replace('.txn', '.sig_hashes'))
n = 0
for input_index, (value, pk_script, txin) in enumerate(zip(values, pk_scripts, tx.inputs)):
for sighash in range(256):
sighash = SigHash(sighash)
if sighash.has_forkid():
signature_hash = tx.signature_hash(input_index, value, pk_script, sighash)
assert signature_hash == correct_hashes[n]
n += 1
def test_signature_hash_bad():
tx, _, _ = read_json_tx('503fd37f.txn')
with pytest.raises(IndexError):
tx.signature_hash(-1, 5, b'', SigHash.ALL)
with pytest.raises(IndexError):
tx.signature_hash(2, 5, b'', SigHash.ALL)
with pytest.raises(ValueError):
tx.signature_hash(0, -1, b'', SigHash.ALL)
with pytest.raises(TypeError):
tx.signature_hash(0, 0, b'', 1)
tx.signature_hash(0, 0, b'', SigHash.NONE)
tx.signature_hash(1, 0, b'', SigHash(1))
@pytest.mark.parametrize("filename", tx_testcases)
def test_signatures(filename):
tx, values, pk_scripts = read_json_tx(filename)
for input_index, (value, pk_script, txin) in enumerate(zip(values, pk_scripts, tx.inputs)):
signature, pubkey = txin.script_sig.ops()
pubkey = PublicKey.from_bytes(pubkey)
signature_hash = tx.signature_hash(input_index, value, pk_script, SigHash(signature[-1]))
assert pubkey.verify_der_signature(signature[:-1], signature_hash, None)
class TestTx:
def test_is_coinbase(self):
tx = read_tx('afda808f.txn')
assert tx.is_coinbase()
def test_are_inputs_final(self):
tx = read_tx('b59de025.txn')
assert tx.are_inputs_final()
tx.inputs[4].sequence += 1
assert not tx.are_inputs_final()
@pytest.mark.parametrize("nin, nout", ((1, 1), (1, 253), (253, 65536), (65536, 1)))
def test_size(self, nin, nout):
tx_in = TxInput(bytes(32), 0xffffffff, b'', 0xffffffff)
tx_out = TxOutput(0, b'')
tx = Tx(2, [tx_in] * nin, [tx_out] * nout, 0)
assert tx.size() == len(tx.to_bytes())
@pytest.mark.parametrize("locktime,inputs_final,height,timestamp,answer", (
# Locktime 0 is always final
(0, False, 0, 0, True),
(0, False, 1, 1, True),
(0, True, 0, 0, True),
(0, True, 1, 1, True),
# Locktime 1 is final only from block height 2
(1, False, 0, 0, False),
(1, False, 1, 0, False),
(1, False, 2, 0, True),
# If all inputs a final a tx is always final
(1, True, 0, 0, True),
(1, True, 1, 0, True),
(1, True, 2, 0, True),
# If < LOCKTIME_THRESHOLD, it's height-based
(LOCKTIME_THRESHOLD - 1, False, LOCKTIME_THRESHOLD - 1, 0, False),
(LOCKTIME_THRESHOLD - 1, False, LOCKTIME_THRESHOLD, 0, True),
(LOCKTIME_THRESHOLD - 1, True, LOCKTIME_THRESHOLD - 1, 0, True),
(LOCKTIME_THRESHOLD - 1, True, LOCKTIME_THRESHOLD, 0, True),
# If >= LOCKTIME_THRESHOLD, it's time-based
(LOCKTIME_THRESHOLD, False, LOCKTIME_THRESHOLD + 1, 0, False),
(LOCKTIME_THRESHOLD, False, 0, LOCKTIME_THRESHOLD, False),
(LOCKTIME_THRESHOLD, False, 0, LOCKTIME_THRESHOLD + 1, True),
(LOCKTIME_THRESHOLD, True, LOCKTIME_THRESHOLD + 1, 0, True),
(LOCKTIME_THRESHOLD, True, 0, LOCKTIME_THRESHOLD, True),
(LOCKTIME_THRESHOLD, True, 0, LOCKTIME_THRESHOLD + 1, True),
))
def test_is_final_for_block(self, locktime, inputs_final, height, timestamp, answer):
tx = read_tx('b59de025.txn')
tx.locktime = locktime
if not inputs_final:
tx.inputs[0].sequence = 0xfffffffe
assert tx.is_final_for_block(height, timestamp) == answer
def test_hash(self):
tx = read_tx('b59de025.txn')
assert tx.hex_hash() == 'b59de0255081f8032c521a1e70d9355876309a0c69e034db31c2ed387e9da809'
def test_total_output(self):
tx = read_tx('b59de025.txn')
assert tx.total_output_value() == 59_999_999_818
@pytest.mark.parametrize("script,coin,json", (
# Genesis tx
(
'01000000010000000000000000000000000000000000000000000000000000000000000000FFFFFFFF4'
'D04FFFF001D0104455468652054696D65732030332F4A616E2F32303039204368616E63656C6C6F7220'
'6F6E206272696E6B206F66207365636F6E64206261696C6F757420666F722062616E6B73FFFFFFFF010'
'0F2052A01000000434104678AFDB0FE5548271967F1A67130B7105CD6A828E03909A67962E0EA1F61DE'
'B649F6BC3F4CEF38C4F35504E51EC112DE5C384DF7BA0B8D578A4C702B6BF11D5FAC00000000',
Bitcoin,
{
'version': 1,
'nInputs': 1,
'vin': [
{
'coinbase': '04ffff001d0104455468652054696d65732030332f4a616e2f323030392'
'04368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f'
'757420666f722062616e6b73',
'text': '\x04��\x00\x1d\x01\x04EThe Times 03/Jan/2009 Chancellor on '
'brink of second bailout for banks',
'sequence': 4294967295
}
],
'nOutputs': 1,
'vout': [
{
'value': 5000000000,
'script': {
'asm': '04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1'
'f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf'
'11d5f OP_CHECKSIG',
'hex': '4104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0e'
'a1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6'
'bf11d5fac'
}
}
],
'locktime': 0,
'hash': '4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b'
},
),
(
'0100000001e1337a3e268d53b9b292dab07a3fbf47a51aa155273362c5a9e7e3dfe64f006e000000006'
'a47304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d7545778a79cd125f0b8022013b3'
'e5a87f3fa84333f222dc32c2c75e630efb205a3c58010aab92ab425453104121030b56f95f6d8d5f6b8'
'4d4c7d6909423bd4b9cf189e9dd287fdea495582a3a5474feffffff01bd731f2c000000001976a914f6'
'7000134f47d60523a36505830115fd52bc656e88ac2bc30800',
Bitcoin,
{
'version': 1,
'nInputs': 1,
'vin': [
{
'hash': 'e1337a3e268d53b9b292dab07a3fbf47a51aa155273362c5a9e7e3dfe64f006e',
'idx': 0,
'script':
{
'asm': '304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d7545778'
'a79cd125f0b8022013b3e5a87f3fa84333f222dc32c2c75e630efb205a3c58010aa'
'b92ab42545310[ALL|FORKID] 030b56f95f6d8d5f6b84d4c7d6909423bd4b9cf18'
'9e9dd287fdea495582a3a5474',
'hex': '47304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d75457'
'78a79cd125f0b8022013b3e5a87f3fa84333f222dc32c2c75e630efb205a3c58010'
'aab92ab425453104121030b56f95f6d8d5f6b84d4c7d6909423bd4b9cf189e9dd28'
'7fdea495582a3a5474'
},
'sequence': 4294967294
}
],
'nOutputs': 1,
'vout': [
{
'value': 740258749,
'script':
{
'asm': 'OP_DUP OP_HASH160 f67000134f47d60523a36505830115fd52bc656e '
'OP_EQUALVERIFY OP_CHECKSIG',
'hex': '76a914f67000134f47d60523a36505830115fd52bc656e88ac'
}
}
],
'locktime': 574251,
'hash': '85d895859f19d8f0125f3a93af854a7b48c04cab8830f800cd5e4daaeb02dc00'
},
),
(
'0100000001e1337a3e268d53b9b292dab07a3fbf47a51aa155273362c5a9e7e3dfe64f006e000000006'
'a47304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d7545778a79cd125f0b8022013b3'
'e5a87f3fa84333f222dc32c2c75e630efb205a3c58010aab92ab425453104121030b56f95f6d8d5f6b8'
'4d4c7d6909423bd4b9cf189e9dd287fdea495582a3a5474feffffff01bd731f2c000000001976a914f6'
'7000134f47d60523a36505830115fd52bc656e88ac2bc30860',
Bitcoin,
{
'version': 1,
'nInputs': 1,
'vin': [
{
'hash': 'e1337a3e268d53b9b292dab07a3fbf47a51aa155273362c5a9e7e3dfe64f006e',
'idx': 0,
'script':
{
'asm': '304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d7545778'
'a79cd125f0b8022013b3e5a87f3fa84333f222dc32c2c75e630efb205a3c58010aa'
'b92ab42545310[ALL|FORKID] 030b56f95f6d8d5f6b84d4c7d6909423bd4b9cf18'
'9e9dd287fdea495582a3a5474',
'hex': '47304402207f5ba050adff0567df3dcdc70d5059c4b8b8d2afc961d75457'
'78a79cd125f0b8022013b3e5a87f3fa84333f222dc32c2c75e630efb205a3c58010'
'aab92ab425453104121030b56f95f6d8d5f6b84d4c7d6909423bd4b9cf189e9dd28'
'7fdea495582a3a5474'
},
'sequence': 4294967294
}
],
'nOutputs': 1,
'vout': [
{
'value': 740258749,
'script':
{
'asm': 'OP_DUP OP_HASH160 f67000134f47d60523a36505830115fd52bc656e '
'OP_EQUALVERIFY OP_CHECKSIG',
'hex': '76a914f67000134f47d60523a36505830115fd52bc656e88ac'
}
}
],
'locktime': 1611186987,
'hash': '9eaa6c0529a2d151eb4f0c7cfe99125c54b8908a0d3e8f66423f769bb553a816'
},
),
), ids=['genesis', 'locktime block', 'locktime time'])
def test_to_json(self, script, coin, json):
flags = 0
assert Tx.from_hex(script).to_json(flags, coin) == json
json['size'] = len(script) // 2
flags += JSONFlags.SIZE
assert Tx.from_hex(script).to_json(flags, coin) == json
if json['locktime'] == 0:
json['locktimeMeaning'] = 'valid in any block'
elif json['locktime'] < 500_000_000:
json['locktimeMeaning'] = (f'valid in blocks with height greater than '
f'{json["locktime"]:,d}')
else:
json['locktimeMeaning'] = (
'valid in blocks with MTP greater than 2021-01-20 23:56:27 UTC'
)
flags += JSONFlags.LOCKTIME_MEANING
assert Tx.from_hex(script).to_json(flags, coin) == json
class TestTxInput:
def test_is_coinbase(self):
txin = TxInput(bytes(32), 0xffffffff, b'', 0xffffffff)
assert txin.is_coinbase()
txin.prev_idx = 0
assert not txin.is_coinbase()
txin.prev_idx = 0xffffffff
assert txin.is_coinbase()
txin.prev_hash = bytes(31) + b'\1'
assert not txin.is_coinbase()
def test_is_final(self):
txin = TxInput(bytes(32), 0xffffffff, b'', 0xffffffff)
assert txin.is_final()
txin.sequence -= 1
assert not txin.is_final()
def test_to_hex(self):
tx = read_tx('afda808f.txn')
assert tx.inputs[0].to_hex() == (
'0000000000000000000000000000000000000000000000000000000000000000ffffffff220319'
'c4082f626d67706f6f6c2e636f6d2f5473537148110d9e7fcc3cf74ee70c0200ffffffff'
)
@pytest.mark.parametrize("script_len", (0, 253, 65000, 120000))
def test_size(self, script_len):
txin = TxInput(bytes(32), 0xffffffff, b'', 0xffffffff)
txin.script_sig = bytes(script_len)
assert txin.size() == len(txin.to_bytes())
@pytest.mark.parametrize("script,json", (
# Genesis coinbase
(
'0000000000000000000000000000000000000000000000000000000000000000ffffffff4d04ffff'
'001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e'
'206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73ffffffff',
{
'coinbase': '04ffff001d0104455468652054696d65732030332f4a616e2f323030'
'39204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261'
'696c6f757420666f722062616e6b73',
'text': '\x04��\x00\x1d\x01\x04EThe Times 03/Jan/2009 Chancellor on brink '
'of second bailout for banks',
'sequence': 4294967295,
},
),
# Another coinbase
(
'0000000000000000000000000000000000000000000000000000000000000000ffffffff41032b2'
'c0a2f7461616c2e636f6d2f506c656173652070617920302e3520736174732f627974652c20696e'
'666f407461616c2e636f6d6419c0bead6d55ff46be0400ffffffff',
{
'coinbase': '032b2c0a2f7461616c2e636f6d2f506c656173652070617920302e352073617'
'4732f627974652c20696e666f407461616c2e636f6d6419c0bead6d55ff46be0400',
'text': '\x03+,\n/taal.com/Please pay 0.5 sats/byte, [email protected]\x19���mU'
'�F�\x04\x00',
'sequence': 4294967295,
}
),
# A P2PK signature
(
'c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd37040000000048473044022'
'04e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07'
'de4860a4acdd12909d831cc56cbbac4622082221a8768d1d0901ffffffff',
{
'hash': 'c997a5e56e104102fa209c6a852dd90660a20b2d9c352423edce25857fcd3704',
'idx': 0,
'script': {
'asm': '304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8'
'cd410220181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d09[ALL]',
'hex': '47304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5f'
'b8cd410220181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d0901'
},
'sequence': 4294967295,
},
),
), ids=['genesis', "coinbase", "p2pk"])
def test_to_json(self, script, json):
assert TxInput.from_hex(script).to_json(0, 0) == json
assert TxInput.from_hex(script).to_json(JSONFlags.CLASSIFY_OUTPUT_SCRIPT, 0) == json
assert TxInput.from_hex(script).to_json(JSONFlags.ENUMERATE_INPUTS, None) == json
n = random.randrange(0, 100)
json.update({'nInput': n})
assert TxInput.from_hex(script).to_json(JSONFlags.ENUMERATE_INPUTS, n) == json
class TestTxOutput:
def test_to_hex(self):
tx = read_tx('afda808f.txn')
assert tx.outputs[0].to_hex() == (
'f992814a000000001976a914db1aea84aad494d9f5b253327da23c4e51266c9388ac'
)
@pytest.mark.parametrize("script_len", (0, 253, 65000, 120000))
def test_size(self, script_len):
output = TxOutput(0, b'')
output.script_pubkey = bytes(script_len)
assert output.size() == len(output.to_bytes())
@pytest.mark.parametrize("script,json,coin,extra", (
# Genesis P2PK output
(
'00f2052a01000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1'
'f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac',
{
'value': 5000000000,
'script': {
'asm': '04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb64'
'9f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f OP_CHECKSIG',
'hex': '4104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb6'
'49f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac'
},
},
Bitcoin,
{
'type': 'pubkey',
'pubkey': '04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb6'
'49f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f',
'address': '1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa',
},
),
# P2PKH output
(
'7dd8db00000000001976a9141207c3cd11e35de894c432e9907f2dcb1446855888ac',
{
'value': 14407805,
'script': {
'asm': 'OP_DUP OP_HASH160 1207c3cd11e35de894c432e9907f2dcb14468558 '
'OP_EQUALVERIFY OP_CHECKSIG',
'hex': '76a9141207c3cd11e35de894c432e9907f2dcb1446855888ac',
},
},
BitcoinTestnet,
{
'type': 'pubkeyhash',
'address': 'mhAHm1zzjzuu61HhiQUyfjqqnewLQ3FM4s',
},
),
), ids=['p2pk', 'p2pkh'])
def test_to_json(self, script, coin, json, extra):
assert TxOutput.from_hex(script).to_json(0, coin) == json
assert TxOutput.from_hex(script).to_json(JSONFlags.ENUMERATE_OUTPUTS, coin) == json
n = random.randrange(0, 100)
json.update({'nOutput': n})
assert TxOutput.from_hex(script).to_json(JSONFlags.ENUMERATE_OUTPUTS, coin, n) == json
json['script'].update(extra)
assert TxOutput.from_hex(script).to_json(JSONFlags.CLASSIFY_OUTPUT_SCRIPT |
JSONFlags.ENUMERATE_OUTPUTS, coin, n) == json
| [
"[email protected]"
]
| |
182d2c133c867b48df3b915ff9cc056dcdba61d5 | f03e50ab105c8dd97bda374fa2d604d480b85fb3 | /apps/projects/models.py | ca45800eb9e00a783cdea3dae4a0abef2f2c4541 | []
| no_license | callowayproject/callowaysite | 9717b7d934ef142b5e6b8fa1e0c93651382198bb | eb25d208586a7dc9ffb88660b07ad942ba9fe231 | refs/heads/master | 2022-12-15T11:38:57.787801 | 2019-07-14T13:21:13 | 2019-07-14T13:21:13 | 730,944 | 1 | 0 | null | 2022-11-22T00:40:56 | 2010-06-20T19:50:00 | CSS | UTF-8 | Python | false | false | 1,511 | py | import datetime
from django.db import models
from django.core.files.images import get_image_dimensions
from projects.settings import LOGO_STORAGE, PROJECT_TYPES, STATUSES
class Project(models.Model):
"""Something that we work on"""
name = models.CharField(blank=True, max_length=255)
description = models.TextField(blank=True)
code_url = models.CharField(blank=True, max_length=255)
docs_url = models.CharField(blank=True, max_length=255)
logo = models.FileField(blank=True, upload_to='projects/logos', storage=LOGO_STORAGE())
logo_width = models.IntegerField(editable=False, blank=True, null=True)
logo_height = models.IntegerField(editable=False, blank=True, null=True)
is_fork = models.BooleanField(default=False)
why_forked = models.TextField(blank=True, null=True)
external_id = models.IntegerField(blank=True, null=True)
project_type = models.IntegerField(choices=PROJECT_TYPES, default=2)
status = models.IntegerField(choices=STATUSES, default=0)
updated = models.DateTimeField(editable=False, default=datetime.datetime.now)
class Meta:
ordering = ('name', )
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if self.logo:
width, height = get_image_dimensions(self.logo.file, close=True)
else:
width, height = None, None
self.key_image_width = width
self.key_image_height = height
super(Project, self).save(*args, **kwargs)
| [
"[email protected]"
]
| |
3f0334e74a172b28d97ef4fe5641f86b7070ca66 | 9426f2e4f25c85c351a4d1b8855fe7d4cfd35210 | /fardel_ecommerce/order/models.py | 43289b3f939406b8fb0777a2648784f5577f747c | []
| no_license | FardelCMS/fardel_ecommerce | 52e4eaebb243c863f0dd6af22be093f4c90af8cd | d4221a7f4f7812d3e491234fc4cca6b828665ae3 | refs/heads/master | 2021-08-01T01:52:22.809056 | 2021-07-29T09:58:11 | 2021-07-29T09:58:11 | 229,290,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,175 | py | import datetime
from ..checkout.models import Cart, CartLine
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import JSONB, UUID
from flask_sqlalchemy import BaseQuery
from flask_jwt_extended import current_user
from fardel_ecommerce.product.models import ProductVariant
from fardel.ext import db
class Order(db.Model):
__tablename__ = "orders"
"""
Status Types:
:Fulfiled:
:Unfulfiled:
:Canceled:
:Done:
"""
id = db.Column(db.Integer, primary_key=True, index=True)
status = db.Column(db.String(64), default="Unfulfiled")
user_id = db.Column(db.Integer, db.ForeignKey('auth_users.id'))
address_id = db.Column(db.Integer, db.ForeignKey('auth_users_address.id'))
create_time = db.Column(db.TIMESTAMP, default=func.current_timestamp())
total = db.Column(db.Integer, default=0)
quantity = db.Column(db.Integer, default=0)
data = db.Column(JSONB())
user = db.relationship("User")
address = db.relationship("UserAddress")
lines = db.relationship("OrderLine")
@staticmethod
def create_from_cart(cart_id, address_id):
cart = Cart.query.filter_by(token=cart_id).first()
if current_user.id == cart.user_id:
order = Order(
user_id=cart.user_id,
total=cart.total,
quantity=cart.quantity,
address_id=address_id,
data=cart.checkout_data
)
db.session.add(order)
db.session.commit()
for line in cart.lines:
order_line = OrderLine(
order_id=order.id,
variant_id=line.variant_id,
quantity=line.quantity,
total=line.get_total(),
data=line.data
)
db.session.add(order_line)
cart.clear()
db.session.flush()
return order
else:
return None
@property
def is_shipping_required(self):
"""Return `True` if any of the lines requires shipping."""
if not hasattr(self, '_is_shipping_required'):
self._is_shipping_required = False
for line in self.lines:
if line.variant.is_shipping_required:
self._is_shipping_required = True
break
return self._is_shipping_required
def delete_line(self, variant_id, data):
""" Delete a line with specified variant_id+data """
line = self.get_line(variant_id, data)
line.delete()
def set_fulfiled(self):
for line in self.lines:
line.variant.quantity_allocated = ProductVariant.quantity_allocated + line.quantity
self.status = "Fulfiled"
db.session.flush()
def dict(self):
""" Serialize object to json """
return {
'id': self.id,
'status': self.status,
'address': self.address.dict(),
'total': self.total,
'quantity': self.quantity,
'lines': [line.dict() for line in self.lines],
'is_shipping_required': self.is_shipping_required,
}
class OrderLine(db.Model):
__tablename__ = "order_lines"
id = db.Column(db.Integer, primary_key=True, index=True)
order_id = db.Column(db.ForeignKey('orders.id'))
variant_id = db.Column(db.Integer,
db.ForeignKey('product_product_variants.id', ondelete="CASCADE"))
total = db.Column(db.Integer)
quantity = db.Column(db.Integer)
data = db.Column(JSONB(), default={})
variant = db.relationship("ProductVariant")
order = db.relationship("Order", overlaps="lines")
def dict(self):
return {
'id': self.id,
'variant': self.variant.dict(cart=True),
'quantity': self.quantity,
'data': self.data,
'total': self.total,
'quantity': self.quantity,
'is_shipping_required': self.is_shipping_required
}
@property
def is_shipping_required(self):
return self.variant.is_shipping_required
| [
"[email protected]"
]
| |
1f65100839d9ff8b15648173db4bdc566eb7e7b4 | 439e3b0fcc8959483bc35ff9c1229ce240037bbe | /tests/test_kanwa.py | db033372d8214dbf5ebd4d6f1563242af952d467 | [
"MIT"
]
| permissive | 403JFW/kakasi-utils | 16fe27265f1b7f05045e4370cf19de080c649e8f | 698b6fc8d812637473dc941b36d9ccff87410d0c | refs/heads/master | 2021-01-02T09:54:13.425825 | 2014-04-15T00:55:51 | 2014-04-15T00:55:51 | 17,693,614 | 3 | 0 | null | 2014-05-12T06:39:36 | 2014-03-13T03:37:09 | Python | UTF-8 | Python | false | false | 1,754 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import unittest
from kakasi_utils.kanwa import Kanwa
class TestKanwa(unittest.TestCase):
def test_merge(self):
"""Test merge"""
# Get dict file paths
data_dir = os.path.dirname(os.path.realpath(__file__)) + '/'
in_files = [
data_dir + "test_kanwa_input_a.txt",
data_dir + "test_kanwa_input_b.txt"
]
out_file = data_dir + "test_kanwa_output.txt"
# Run merge
kanwa = Kanwa()
kanwa.merge(in_files, out_file)
# Assert result
for in_file in in_files:
self._assert_dict_in_dict(in_file, out_file)
# Check duplication
self._load_dict(out_file, check_duplication=True)
os.remove(out_file)
def _assert_dict_in_dict(self, file_child, file_parent):
"""Assert that child dict files item in parent dict file"""
dict_child = self._load_dict(file_child)
dict_parent = self._load_dict(file_parent)
for item in dict_child.keys():
if item not in dict_parent:
raise AssertionError("'%s' not exists in %s" % (
item, dict_parent))
def _load_dict(self, in_dict_file, check_duplication=False):
"""Load KAKASI dict file and return python dict"""
table = {}
with codecs.open(in_dict_file, 'rU', 'euc_jp') as in_file:
for line in in_file:
line = line.rstrip()
if line[0:2] == ';;':
continue
if check_duplication and (line in table):
raise AssertionError("'%s' duplicates" % line)
table[line] = True
return table
| [
"[email protected]"
]
| |
c6ce9e4a4ce2934670386105b410efd371bb56c3 | 87140007e96872d3611f0778eb0eebe5799616d7 | /runs/1000KB/src2-tgt1/seq-nobro-iter08000.cfg.py | d889d92141bd83f110aee7e52fd487b910171abe | [
"MIT"
]
| permissive | janpawellek/broeval | 49499fa302abff916ffced201034d3b9394503cd | 57e31aa6e354d0bba88103b44910483e8d982d00 | refs/heads/master | 2021-01-11T12:19:13.619220 | 2016-12-20T16:23:27 | 2016-12-20T16:23:27 | 76,468,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py |
# Write results to this file
OUTFILE = 'runs/1000KB/src2-tgt1/seq-nobro-iter08000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1', '10.0.0.3']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [False, False]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [False]
# Connection mode (par = parallel, seq = sequential)
MODE = 'seq'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 8000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 6
| [
"[email protected]"
]
| |
f4b33a1d107c661005411ee377782495662a53f5 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/appcontainers/azure-mgmt-appcontainers/azure/mgmt/appcontainers/operations/_container_apps_operations.py | 57696edccfe1612b68eccd942bdcaf4fd0b1173b | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
]
| permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 62,915 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_subscription_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.App/containerApps")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_custom_host_name_analysis_request(
resource_group_name: str,
container_app_name: str,
subscription_id: str,
*,
custom_hostname: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listCustomHostNameAnalysis",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
if custom_hostname is not None:
_params["customHostname"] = _SERIALIZER.query("custom_hostname", custom_hostname, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_secrets_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listSecrets",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_auth_token_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/getAuthtoken",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class ContainerAppsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appcontainers.ContainerAppsAPIClient`'s
:attr:`container_apps` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_subscription(self, **kwargs: Any) -> Iterable["_models.ContainerApp"]:
"""Get the Container Apps in a given subscription.
Get the Container Apps in a given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerApp or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerAppCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ContainerAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.App/containerApps"}
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.ContainerApp"]:
"""Get the Container Apps in a given resource group.
Get the Container Apps in a given resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerApp or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerAppCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ContainerAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps"
}
@distributed_trace
def get(self, resource_group_name: str, container_app_name: str, **kwargs: Any) -> _models.ContainerApp:
"""Get the properties of a Container App.
Get the properties of a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerApp or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.ContainerApp
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
404: lambda response: ResourceNotFoundError(response=response, error_format=ARMErrorFormat),
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
def _create_or_update_initial(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> _models.ContainerApp:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(container_app_envelope, (IO, bytes)):
_content = container_app_envelope
else:
_json = self._serialize.body(container_app_envelope, "ContainerApp")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("ContainerApp", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@overload
def begin_create_or_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: _models.ContainerApp,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Create or update a Container App.
Create or update a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties used to create a container app. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Create or update a Container App.
Create or update a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties used to create a container app. Required.
:type container_app_envelope: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Create or update a Container App.
Create or update a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties used to create a container app. Is either a model
type or a IO type. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
container_app_envelope=container_app_envelope,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, container_app_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@distributed_trace
def begin_delete(self, resource_group_name: str, container_app_name: str, **kwargs: Any) -> LROPoller[None]:
"""Delete a Container App.
Delete a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
container_app_name=container_app_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
def _update_initial(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> Optional[_models.ContainerApp]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.ContainerApp]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(container_app_envelope, (IO, bytes)):
_content = container_app_envelope
else:
_json = self._serialize.body(container_app_envelope, "ContainerApp")
request = build_update_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@overload
def begin_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: _models.ContainerApp,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Update properties of a Container App.
Patches a Container App using JSON Merge Patch.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties of a Container App that need to be updated. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Update properties of a Container App.
Patches a Container App using JSON Merge Patch.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties of a Container App that need to be updated. Required.
:type container_app_envelope: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Update properties of a Container App.
Patches a Container App using JSON Merge Patch.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties of a Container App that need to be updated. Is either
a model type or a IO type. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
container_app_envelope=container_app_envelope,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@distributed_trace
def list_custom_host_name_analysis(
self, resource_group_name: str, container_app_name: str, custom_hostname: Optional[str] = None, **kwargs: Any
) -> _models.CustomHostnameAnalysisResult:
"""Analyzes a custom hostname for a Container App.
Analyzes a custom hostname for a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param custom_hostname: Custom hostname. Default value is None.
:type custom_hostname: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomHostnameAnalysisResult or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.CustomHostnameAnalysisResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.CustomHostnameAnalysisResult] = kwargs.pop("cls", None)
request = build_list_custom_host_name_analysis_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
custom_hostname=custom_hostname,
api_version=api_version,
template_url=self.list_custom_host_name_analysis.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("CustomHostnameAnalysisResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_custom_host_name_analysis.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listCustomHostNameAnalysis"
}
@distributed_trace
def list_secrets(
self, resource_group_name: str, container_app_name: str, **kwargs: Any
) -> _models.SecretsCollection:
"""List secrets for a container app.
List secrets for a container app.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretsCollection or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.SecretsCollection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.SecretsCollection] = kwargs.pop("cls", None)
request = build_list_secrets_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_secrets.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("SecretsCollection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_secrets.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listSecrets"
}
@distributed_trace
def get_auth_token(
self, resource_group_name: str, container_app_name: str, **kwargs: Any
) -> _models.ContainerAppAuthToken:
"""Get auth token for a container app.
Get auth token for a container app.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerAppAuthToken or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.ContainerAppAuthToken
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
404: lambda response: ResourceNotFoundError(response=response, error_format=ARMErrorFormat),
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerAppAuthToken] = kwargs.pop("cls", None)
request = build_get_auth_token_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_auth_token.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ContainerAppAuthToken", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_auth_token.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/getAuthtoken"
}
| [
"[email protected]"
]
| |
afd9ffeb1bc993d9503161429c26f6b38e550db9 | 3dbbde1aa96fc09e9aab885cf3713e86f3572dec | /gs-vtoi/bin/glacier | 6e1a948d2c394d1ca44797d1d7fd32027f7bc0eb | []
| no_license | bopopescu/gs-vtoi | 6223d6dbf47e89292bd0e79e24e5664450e28cf6 | f12b802976d0020179d1b40b0b5e3af5b72d55cc | refs/heads/master | 2022-11-24T16:31:36.804869 | 2018-07-31T08:30:56 | 2018-07-31T08:30:56 | 282,551,982 | 0 | 0 | null | 2020-07-26T01:09:10 | 2020-07-26T01:09:09 | null | UTF-8 | Python | false | false | 5,288 | #!/Users/Sang/OneDrive/Developments/gs-vtoi/gs-vtoi/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Miguel Olivares http://moliware.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
glacier
~~~~~~~
Amazon Glacier tool built on top of boto. Look at the usage method to see
how to use it.
Author: Miguel Olivares <[email protected]>
"""
import sys
from boto.glacier import connect_to_region
from getopt import getopt, GetoptError
from os.path import isfile, basename
COMMANDS = ('vaults', 'jobs', 'upload')
def usage():
print("""
glacier <command> [args]
Commands
vaults - Operations with vaults
jobs - Operations with jobs
upload - Upload files to a vault. If the vault doesn't exits, it is
created
Common args:
--access_key - Your AWS Access Key ID. If not supplied, boto will
use the value of the environment variable
AWS_ACCESS_KEY_ID
--secret_key - Your AWS Secret Access Key. If not supplied, boto
will use the value of the environment variable
AWS_SECRET_ACCESS_KEY
--region - AWS region to use. Possible values: us-east-1, us-west-1,
us-west-2, ap-northeast-1, eu-west-1.
Default: us-east-1
Vaults operations:
List vaults:
glacier vaults
Jobs operations:
List jobs:
glacier jobs <vault name>
Uploading files:
glacier upload <vault name> <files>
Examples :
glacier upload pics *.jpg
glacier upload pics a.jpg b.jpg
""")
sys.exit()
def connect(region, debug_level=0, access_key=None, secret_key=None):
""" Connect to a specific region """
layer2 = connect_to_region(region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
debug=debug_level)
if layer2 is None:
print('Invalid region (%s)' % region)
sys.exit(1)
return layer2
def list_vaults(region, access_key=None, secret_key=None):
layer2 = connect(region, access_key = access_key, secret_key = secret_key)
for vault in layer2.list_vaults():
print(vault.arn)
def list_jobs(vault_name, region, access_key=None, secret_key=None):
layer2 = connect(region, access_key = access_key, secret_key = secret_key)
print(layer2.layer1.list_jobs(vault_name))
def upload_files(vault_name, filenames, region, access_key=None, secret_key=None):
layer2 = connect(region, access_key = access_key, secret_key = secret_key)
layer2.create_vault(vault_name)
glacier_vault = layer2.get_vault(vault_name)
for filename in filenames:
if isfile(filename):
sys.stdout.write('Uploading %s to %s...' % (filename, vault_name))
sys.stdout.flush()
archive_id = glacier_vault.upload_archive(
filename,
description = basename(filename))
print(' done. Vault returned ArchiveID %s' % archive_id)
def main():
if len(sys.argv) < 2:
usage()
command = sys.argv[1]
if command not in COMMANDS:
usage()
argv = sys.argv[2:]
options = 'a:s:r:'
long_options = ['access_key=', 'secret_key=', 'region=']
try:
opts, args = getopt(argv, options, long_options)
except GetoptError as e:
usage()
# Parse agument
access_key = secret_key = None
region = 'us-east-1'
for option, value in opts:
if option in ('-a', '--access_key'):
access_key = value
elif option in ('-s', '--secret_key'):
secret_key = value
elif option in ('-r', '--region'):
region = value
# handle each command
if command == 'vaults':
list_vaults(region, access_key, secret_key)
elif command == 'jobs':
if len(args) != 1:
usage()
list_jobs(args[0], region, access_key, secret_key)
elif command == 'upload':
if len(args) < 2:
usage()
upload_files(args[0], args[1:], region, access_key, secret_key)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| ||
82fd235db118646fc86003f4b9b8c9456cea7a02 | c5758c1f4c880f4530df1a5ffb4c30ee2da445ee | /pytracking/vot_ep/sk3x3_meanmax_adaptive/vot_wrapper_sk3x3_meanmax_adaptive_ep0024.py | 23ccb4ec2fc0650043d494e0655d884737572b61 | []
| no_license | bfjei2825401/d3s | 6d662fc301181a0e3ad831b0db6111e3cf8f4097 | 32140a3c67252f0e98cbfbf6ad6d2a79267c221b | refs/heads/master | 2023-02-27T09:57:25.692878 | 2021-01-27T14:20:57 | 2021-01-27T14:20:57 | 297,217,521 | 0 | 0 | null | 2020-09-21T03:23:09 | 2020-09-21T03:23:09 | null | UTF-8 | Python | false | false | 2,523 | py | import pytracking.vot as vot
import sys
import cv2
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from pytracking.tracker.segm_sk3x3_meanmax_adaptive import SegmSK3x3MeanMaxAdaptive
from pytracking.parameter.segm_sk3x3_meanmax_adaptive import default_params_ep as vot_params
def rect_to_poly(rect):
x0 = rect[0]
y0 = rect[1]
x1 = rect[0] + rect[2]
y1 = rect[1]
x2 = rect[0] + rect[2]
y2 = rect[1] + rect[3]
x3 = rect[0]
y3 = rect[1] + rect[3]
return [x0, y0, x1, y1, x2, y2, x3, y3]
def parse_sequence_name(image_path):
idx = image_path.find('/color/')
return image_path[idx - image_path[:idx][::-1].find('/'):idx], idx
def parse_frame_name(image_path, idx):
frame_name = image_path[idx + len('/color/'):]
return frame_name[:frame_name.find('.')]
# MAIN
handle = vot.VOT("polygon")
selection = handle.region()
imagefile = handle.frame()
if not imagefile:
sys.exit(0)
params = vot_params.parameters(24)
gt_rect = [round(selection.points[0].x, 2), round(selection.points[0].y, 2),
round(selection.points[1].x, 2), round(selection.points[1].y, 2),
round(selection.points[2].x, 2), round(selection.points[2].y, 2),
round(selection.points[3].x, 2), round(selection.points[3].y, 2)]
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
sequence_name, idx_ = parse_sequence_name(imagefile)
frame_name = parse_frame_name(imagefile, idx_)
params.masks_save_path = ''
params.save_mask = False
tracker = SegmSK3x3MeanMaxAdaptive(params)
# tell the sequence name to the tracker (to save segmentation masks to the disk)
tracker.sequence_name = sequence_name
tracker.frame_name = frame_name
tracker.initialize(image, gt_rect)
while True:
imagefile = handle.frame()
if not imagefile:
break
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
# tell the frame name to the tracker (to save segmentation masks to the disk)
frame_name = parse_frame_name(imagefile, idx_)
tracker.frame_name = frame_name
prediction = tracker.track(image)
if len(prediction) == 4:
prediction = rect_to_poly(prediction)
pred_poly = vot.Polygon([vot.Point(prediction[0], prediction[1]),
vot.Point(prediction[2], prediction[3]),
vot.Point(prediction[4], prediction[5]),
vot.Point(prediction[6], prediction[7])])
handle.report(pred_poly)
| [
"[email protected]"
]
| |
66eb41b497be5f43356f205ce49307cd2e618a2e | 63ba933a294865f65409635f62e0f1d59f725f37 | /src/linkedLists/flatten.py | 4b148dc9198652a6473dde5fda7746be4087ad87 | [
"CC0-1.0"
]
| permissive | way2arun/datastructures_algorithms | fc4302bdbb923ef8912a4acf75a286f2b695de2a | 4ea4c1579c28308455be4dfa02bd45ebd88b2d0a | refs/heads/master | 2021-12-07T04:34:35.732026 | 2021-09-30T12:11:32 | 2021-09-30T12:11:32 | 203,658,808 | 1 | 0 | null | 2020-08-08T15:55:09 | 2019-08-21T20:23:46 | Python | UTF-8 | Python | false | false | 3,212 | py | """
https://leetcode.com/explore/challenge/card/july-leetcoding-challenge/545/week-2-july-8th-july-14th/3386/
You are given a doubly linked list which in addition to the next and previous pointers, it could have a child pointer, which may or may not point to a separate doubly linked list. These child lists may have one or more children of their own, and so on, to produce a multilevel data structure, as shown in the example below.
Flatten the list so that all the nodes appear in a single-level, doubly linked list. You are given the head of the first level of the list.
Example 1:
Input: head = [1,2,3,4,5,6,null,null,null,7,8,9,10,null,null,11,12]
Output: [1,2,3,7,8,11,12,9,10,4,5,6]
Explanation:
The multilevel linked list in the input is as follows:
After flattening the multilevel linked list it becomes:
Example 2:
Input: head = [1,2,null,3]
Output: [1,3,2]
Explanation:
The input multilevel linked list is as follows:
1---2---NULL
|
3---NULL
Example 3:
Input: head = []
Output: []
How multilevel linked list is represented in test case:
We use the multilevel linked list from Example 1 above:
1---2---3---4---5---6--NULL
|
7---8---9---10--NULL
|
11--12--NULL
The serialization of each level is as follows:
[1,2,3,4,5,6,null]
[7,8,9,10,null]
[11,12,null]
To serialize all levels together we will add nulls in each level to signify no node connects to the upper node of the previous level. The serialization becomes:
[1,2,3,4,5,6,null]
[null,null,7,8,9,10,null]
[null,11,12,null]
Merging the serialization of each level and removing trailing nulls we obtain:
[1,2,3,4,5,6,null,null,null,7,8,9,10,null,null,11,12]
Constraints:
Number of Nodes will not exceed 1000.
1 <= Node.val <= 10^5
"""
# Definition for a Node.
class Node:
def __init__(self, val, prev, next, child):
self.val = val
self.prev = prev
self.next = next
self.child = child
class Solution:
def flatten(self, head: 'Node') -> 'Node':
# Solution 1 - 36 ms
"""
if not head:
return head
order = []
stack = [head]
while stack:
curr = stack.pop()
order.append(curr)
if curr.next:
stack.append(curr.next)
if curr.child:
stack.append(curr.child)
curr.child = None
for i in range(len(order) - 1):
order[i].next = order[i + 1]
order[i + 1].prev = order[i]
return order[0]
"""
# Solution 2
pointer = head
branches = []
while pointer:
if pointer.child:
if pointer.next: branches.append(pointer.next)
pointer.next = pointer.child
pointer.child = None
pointer.next.prev = pointer
elif not pointer.next and len(branches) > 0:
pointer.next = branches.pop()
pointer.next.prev = pointer
pointer = pointer.next
return head
# Main Call
root_node = Node(1,2,"null",3)
print(root_node)
head = [1,2,None,3]
solution = Solution()
print(solution.flatten(root_node))
| [
"[email protected]"
]
| |
03c9b8c1400c21f8f1f1f697eace517cba3fabce | f0b75bd94f133a13f469f429a696f26be3be9862 | /week 2/.history/python_second_assignment_20200204154901.py | ca2a7780d18decaa9aca7b5410cab8eda6e90bd4 | []
| no_license | dechavez4/Python_handin_assignments | 023350fabd212cdf2a4ee9cd301306dc5fd6bea0 | 82fd8c991e560c18ecb2152ea5a8fc35dfc3c608 | refs/heads/master | 2023-01-11T23:31:27.220757 | 2020-05-22T10:33:56 | 2020-05-22T10:33:56 | 237,179,899 | 0 | 0 | null | 2022-12-30T20:14:04 | 2020-01-30T09:30:16 | Python | UTF-8 | Python | false | false | 1,447 | py | # Exercise 1
# Create a python file with 3 functions:
# A. def print_file_content(file) that can print content of a csv file to the console
import csv
from sys import argv
import platform
filename = argv[1]
def print_file_content(file):
with open(filename) as csv_file:
content = csv_file.readlines()
for line in content[:20]:
print(line.strip().split(','))
# kan overskrive den gamle file.
# B. def write_list_to_file(output_file, lst) that can take a list of tuple and write each element to a new line in file
def write_list_to_file(output_file, *lst):
if platform.system() == 'Windows':
newline=''
else:
newline=None
with open (output_file, 'w', newline=newline) as output_file:
output_writer = csv.writer(output_file)
for ele in lst:
output_writer.writerow(ele)
# C. def read_csv(input_file) that take a csv file and read each row into a list
def read_line(file):
with open(file) as file_object:
lines = file_object.readlines()
print(lines)
for line in lines:
print(line.rstrip())
def main():
if argv[2] == 'print_file_content':
print_file_content(filename)
if argv[2] == 'write_list_to_file':
inputfield = argv[3:]
write_list_to_file(filename, inputfield)
if argv[2] == 'read_line':
read_line(filename)
def run():
if__name__ == '__main__':
run() | [
"[email protected]"
]
| |
39765aad0f84ce97c089987f6a920f1900d8407c | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /tools/CrossStackProfiler/CspReporter.py | 7ae672a2e99fd3f8e3f64c223b2fc2c9a0b3ecf5 | [
"Apache-2.0"
]
| permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 8,467 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
from multiprocessing import Process
from CspFileReader import (
DCGM_PATH,
FILEORGANIZEFORM_BYRANK,
FILEORGANIZEFORM_BYTRAINER,
NET_PATH,
PROFILE_PATH,
TIME_PATH,
getLogger,
)
from DCGMFileReader import dcgmFileReader
from ProfileFileReader import profileFileReader
def get_argparse():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--profile_path',
type=str,
default='.',
help='Working path that store the monitor data.',
)
parser.add_argument(
'--timeline_path',
type=str,
default='.',
help='Output timeline file name.',
)
parser.add_argument(
'--gpuPerTrainer', type=int, default=8, help='Gpus per trainer.'
)
parser.add_argument(
'--trainerNum', type=int, default=4, help='Num of trainer.'
)
parser.add_argument(
'--groupSize', type=int, default=8, help='Num of trainer in a group.'
)
parser.add_argument(
'--displaySize',
type=int,
default=2,
help='Num of line need to display in a group.',
)
return parser.parse_args()
class CspReporter:
def __init__(self, args):
self._args = args
print(self._args)
self._workPath = self._args.profile_path
self._saveFilePath = self._args.timeline_path
self._gpuPerTrainer = self._args.gpuPerTrainer
self._groupSize = self._args.groupSize
self._displaySize = self._args.displaySize
self._trainerNum = self._args.trainerNum
self._checkArgs()
self._init_logger()
self._init_timeInfo()
self._init_reader()
def _checkArgs(self):
if self._trainerNum % self._groupSize != 0:
raise Exception(
"Input args error: trainerNum[%d] %% groupSize[%d] != 0"
% (self._trainerNum, self._groupSize)
)
def _init_logger(self):
self._logger = getLogger()
def _init_reader(self):
self._dcgmPath = os.path.join(self._workPath, DCGM_PATH)
self._netPath = os.path.join(self._workPath, NET_PATH)
self._profilePath = os.path.join(self._workPath, PROFILE_PATH)
self._netFileReaderArgs = {
"dataPath": self._netPath,
"groupSize": self._groupSize,
"displaySize": self._displaySize,
"gpuPerTrainer": self._gpuPerTrainer,
"minTimeStamp": self._minTimeStamp,
"organizeForm": FILEORGANIZEFORM_BYTRAINER,
}
self._dcgmFileReaderArgs = {
"dataPath": self._dcgmPath,
"groupSize": self._groupSize,
"displaySize": self._displaySize,
"gpuPerTrainer": self._gpuPerTrainer,
"minTimeStamp": self._minTimeStamp,
"organizeForm": FILEORGANIZEFORM_BYTRAINER,
}
self._profileFileReaderArgs = {
"dataPath": self._profilePath,
"groupSize": self._groupSize,
"displaySize": self._displaySize,
"gpuPerTrainer": self._gpuPerTrainer,
"minTimeStamp": self._minTimeStamp,
"organizeForm": FILEORGANIZEFORM_BYRANK,
}
self._dcgmFileReader = dcgmFileReader(
self._logger, self._dcgmFileReaderArgs
)
self._profileFileReader = profileFileReader(
self._logger, self._profileFileReaderArgs
)
def _init_timeInfo(self):
self._timePath = os.path.join(self._workPath, TIME_PATH)
self._timeInfo = {}
self._minTimeStamp = 0
self._set_timeInfo()
def _set_timeInfo(self, timeFileNamePrefix="time.txt", sed="."):
timeFileNameList = glob.glob(
os.path.join(self._timePath, timeFileNamePrefix, sed, "*")
)
for timeFileName in timeFileNameList:
trainerId = int(timeFileName.split(sed)[-1])
gpuId = int(timeFileName.split(sed)[-2])
info = {}
with open(timeFileName, "r") as rf:
for line in rf:
if line.startswith("start time:"):
info["start_time"] = int(
float(line.split(":")[-1]) * 1e9
)
self._minTimeStamp = min(
self._minTimeStamp, info["start_time"]
)
if line.startswith("end time:"):
info["end_time"] = int(float(line.split(":")[-1]) * 1e9)
if not info:
self._timeInfo[gpuId * trainerId] = info
def _generateTraceFileByGroupAndGpuId(
self, pipileInfo, netInfo, groupId, gpuId
):
dcgmInfoDict = self._dcgmFileReader.getDcgmInfoDict(groupId, gpuId)
opInfoDict = self._profileFileReader.getOpInfoDict(groupId, gpuId)
traceObj = {}
traceObj["traceEvents"] = (
pipileInfo[str(gpuId)]
+ opInfoDict["traceEvents"]
+ dcgmInfoDict["traceEvents"]
+ netInfo["traceEvents"]
)
self._profileFileReader.dumpDict(
traceObj, "traceFile", groupId, gpuId, False, self._saveFilePath
)
def _generateTraceFileByGroup(self, groupId, processNum):
# first we need to generate pipeline info
pipileInfo = self._profileFileReader.getPipeLineInfo(
groupId, processNum
)
# second we need to generate dcgm info
dcgmInfo = self._dcgmFileReader.getDCGMTraceInfo(groupId, processNum)
# third we need to generate net info
netInfo = {}
netInfo["traceEvents"] = []
# netInfo = self._netFileReader.parseFileByGroup(groupId, processNum)
# forth we need to generate op info
opInfo = self._profileFileReader.getOPTraceInfo(groupId)
# finally we need dump this information into disk
processPool = []
pidList = []
for gpuId in range(self._gpuPerTrainer):
subproc = Process(
target=self._generateTraceFileByGroupAndGpuId,
args=(
pipileInfo,
netInfo,
groupId,
gpuId,
),
)
processPool.append(subproc)
subproc.start()
pidList.append(subproc.pid)
self._logger.info(
"[traceFile]: process [%d] has been started, total task num is %d ..."
% (subproc.pid, 1)
)
for t in processPool:
t.join()
pidList.remove(t.pid)
self._logger.info(
"[traceFile]: process [%d] has exited! remained %d process!"
% (t.pid, len(pidList))
)
def generateTraceFile(self, processNum=8):
processPool = []
pidList = []
for groupId in range(self._trainerNum / self._groupSize):
subproc = Process(
target=self._generateTraceFileByGroup,
args=(
groupId,
processNum,
),
)
processPool.append(subproc)
subproc.start()
pidList.append(subproc.pid)
self._logger.info(
"[GroupTraceFile]: process [%d] has been started, total task num is %d ..."
% (subproc.pid, 1)
)
for t in processPool:
t.join()
pidList.remove(t.pid)
self._logger.info(
"[GroupTraceFile]: process [%d] has exited! remained %d process!"
% (t.pid, len(pidList))
)
if __name__ == '__main__':
args = get_argparse()
tl = CspReporter(args)
tl.generateTraceFile()
| [
"[email protected]"
]
| |
667a6a1286fe0c8a7c4877e2d9a1aab0a9a79399 | c3ff891e0e23c5f9488508d30349259cc6b64b4d | /python练习/django exercise/FormsDemo/first/views.py | ebcd835469f1f68f965b5e54504c5e8ab9bab17f | []
| no_license | JacksonMike/python_exercise | 2af2b8913ec8aded8a17a98aaa0fc9c6ccd7ba53 | 7698f8ce260439abb3cbdf478586fa1888791a61 | refs/heads/master | 2020-07-14T18:16:39.265372 | 2019-08-30T11:56:29 | 2019-08-30T11:56:29 | 205,370,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | from django.shortcuts import render, HttpResponse
from django.core.exceptions import ValidationError
from django.forms import widgets
from first.models import UserInfo
from django import forms
# Create your views here.
def register(request):
if request.method == "GET":
form = UserForm()
return render(request, "register.html", locals())
else:
print(request.POST)
form = UserForm(request.POST)
if form.is_valid():
# 匹配成功的数据
print(form.cleaned_data)
UserInfo.objects.create(**form.cleaned_data)
return HttpResponse("注册成功")
else:
# 未能匹配的数据
error_data = form.errors
return render(request, "register.html", locals())
class UserForm(forms.Form):
user = forms.CharField(max_length=7,
label="用户名",
error_messages={"required": "该字段不能为空"},
widget=widgets.TextInput(attrs={"class": "form-control"}))
pwd = forms.CharField(max_length=7,
label="密码",
error_messages={"required": "该字段不能为空"},
widget=widgets.PasswordInput(attrs={"class": "form-control"}))
email = forms.EmailField(min_length=5,
label="邮箱",
error_messages={"invalid": "邮箱格式错误", "required": "该字段不能为空"},
widget=widgets.EmailInput(attrs={"class": "form-control"}))
def clean_user(self):
"""判断用户名是否被注册"""
val = self.cleaned_data.get("user")
if not UserInfo.objects.filter(user=val).first():
return val
else:
raise ValidationError("该用户名已经被注册")
def clean_pwd(self):
val = self.cleaned_data.get("pwd")
if val.isdigit():
raise ValidationError("密码不能为纯数字")
else:
return val
| [
"[email protected]"
]
| |
3f89fb97ec5363fc81efe42ce4a627e34436e809 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_suite.py | 5d9059953940881ade58e572a6b7dde68f38bcfb | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py |
#calss header
class _SUITE():
def __init__(self,):
self.name = "SUITE"
self.definitions = [u'a set of connected rooms, especially in a hotel: ', u'a set of furniture for one room, of matching design and colour: ', u'a piece of music with several parts, usually all in the same key', u'a set of related software (= computer program) products']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
1bdd34e88fd6277b360b09b84201d96e1a50fe44 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/python_all/143_15.py | 8d67528a49ea0ab7b49f24cfcb96309e98a02750 | []
| no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,669 | py | Python | Check if string ends with any string in given list
While working with strings, their prefixes and suffix play an important role
in making any decision. For data manipulation tasks, we may need to sometimes,
check if a string ends with any of the matching strings. Let’s discuss certain
ways in which this task can be performed.
**Method #1 : Usingfilter() + endswith()**
The combination of the above function can help to perform this particular
task. The filter method is used to check for each word and endswith method
tests for the suffix logic at target list.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# Checking for string match suffix
# using filter() + endswith()
# initializing string
test_string = "GfG is best"
# initializing suffix list
suff_list = ['best', 'iss', 'good']
# printing original string
print("The original string : " + str(test_string))
# using filter() + endswith()
# Checking for string match suffix
res = list(filter(test_string.endswith, suff_list)) != []
# print result
print("Does string end with any suffix list sublist ? : " +
str(res))
---
__
__
**Output :**
The original string : GfG is best
Does string end with any suffix list sublist ? : True
**Method #2 : Usingendswith()**
As an improvement to the above method, it is not always necessary to include
filter method for comparison. This task can be handled solely by supplying a
suffix check list as an argument to endswith method as well.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# Checking for string match suffix
# using endswith()
# initializing string
test_string = "GfG is best"
# initializing suffix list
suff_list = ['best', 'iss', 'good']
# printing original string
print("The original string : " + str(test_string))
# using endswith()
# Checking for string match suffix
res = test_string.endswith(tuple(suff_list))
# print result
print("Does string end with any suffix list sublist ? : " +
str(res))
---
__
__
**Output :**
The original string : GfG is best
Does string end with any suffix list sublist ? : True
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"[email protected]"
]
| |
cd979cf383863e27fac2a067d8e949630956d387 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/io_scene_md2/quake2/bsp.py | fd4a108bf3ac3eaa8e59362e0ea2f1064605f00e | []
| no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,530 | py | """This module provides file I/O for Quake 2 BSP map files.
Example:
bsp_file = bsp.Bsp.open('base1.bsp')
References:
Quake 2 Source
- id Software
- https://github.com/id-Software/Quake-2
Quake 2 BSP File Format
- Max McGuire
- http://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml
"""
import io
import struct
__all__ = ['BadBspFile', 'is_bspfile', 'Bsp']
class BadBspFile(Exception):
pass
def _check_bspfile(fp):
fp.seek(0)
data = fp.read(struct.calcsize('<4si'))
identity, version = struct.unpack('<4si', data)[0]
return identity is b'IBSP' and version is 38
def is_bspfile(filename):
"""Quickly see if a file is a bsp file by checking the magic number.
The filename argument may be a file for file-like object.
"""
result = False
try:
if hasattr(filename, 'read'):
return _check_bspfile(fp=filename)
else:
with open(filename, 'rb') as fp:
return _check_bspfile(fp)
except:
pass
return result
class ClassSequence:
"""Class for reading a sequence of data structures"""
Class = None
@classmethod
def write(cls, file, structures):
for structure in structures:
cls.Class.write(file, structure)
@classmethod
def read(cls, file):
return [cls.Class(*c) for c in struct.iter_unpack(cls.Class.format, file.read())]
class Entities:
"""Class for representing the entities lump"""
@classmethod
def write(cls, file, entities):
entities_data = entities.encode('cp437')
file.write(entities_data)
@classmethod
def read(cls, file):
entities_data = file.read()
return entities_data.decode('cp437')
class Plane:
"""Class for representing a bsp plane
Attributes:
normal: The normal vector to the plane.
distance: The distance from world (0, 0, 0) to a point on the plane
type: Planes are classified as follows:
0: Axial plane aligned to the x-axis.
1: Axial plane aligned to the y-axis.
2: Axial plane aligned to the z-axis.
3: Non-axial plane roughly aligned to the x-axis.
4: Non-axial plane roughly aligned to the y-axis.
5: Non-axial plane roughly aligned to the z-axis.
"""
format = '<4fi'
size = struct.calcsize(format)
__slots__ = (
'normal',
'distance',
'type'
)
def __init__(self,
normal_x,
normal_y,
normal_z,
distance,
type):
self.normal = normal_x, normal_y, normal_z
self.distance = distance
self.type = type
@classmethod
def write(cls, file, plane):
plane_data = struct.pack(cls.format,
*plane.normal,
plane.distance,
plane.type)
file.write(plane_data)
@classmethod
def read(cls, file):
plane_data = file.read(cls.size)
plane_struct = struct.unpack(cls.format, plane_data)
return Plane(*plane_struct)
class Planes(ClassSequence):
Class = Plane
class Vertex:
"""Class for representing a vertex
A Vertex is an XYZ triple.
Attributes:
x: The x-coordinate
y: The y-coordinate
z: The z-coordinate
"""
format = '<3f'
size = struct.calcsize(format)
__slots__ = (
'x',
'y',
'z'
)
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __getitem__(self, item):
if type(item) is int:
return [self.x, self.y, self.z][item]
elif type(item) is slice:
start = item.start or 0
stop = item.stop or 3
return [self.x, self.y, self.z][start:stop]
@classmethod
def write(cls, file, vertex):
vertex_data = struct.pack(cls.format,
vertex.x,
vertex.y,
vertex.z)
file.write(vertex_data)
@classmethod
def read(cls, file):
vertex_data = file.read(cls.size)
vertex_struct = struct.unpack(cls.format, vertex_data)
return Vertex(*vertex_struct)
class Vertexes(ClassSequence):
Class = Vertex
class Visibilities:
@classmethod
def write(cls, file, structures):
file.write(structures)
@classmethod
def read(cls, file):
return file.read()
class Node:
"""Class for representing a node
A Node is a data structure used to compose a bsp tree data structure. A
child may be a Node or a Leaf.
Attributes:
plane_number: The number of the plane that partitions the node.
children: A two-tuple of the two sub-spaces formed by the partitioning
plane.
Note: Child 0 is the front sub-space, and 1 is the back sub-space.
Note: If bit 15 is set, the child is a leaf.
bounding_box_min: The minimum coordinate of the bounding box containing
this node and all of its children.
bounding_box_max: The maximum coordinate of the bounding box containing
this node and all of its children.
first_face: The number of the first face in Bsp.mark_surfaces.
number_of_faces: The number of faces contained in the node. These
are stored in consecutive order in Bsp.mark_surfaces starting at
Node.first_face.
"""
format = '<3i6h2H'
size = struct.calcsize(format)
__slots__ = (
'plane_number',
'children',
'bounding_box_min',
'bounding_box_max',
'first_face',
'number_of_faces'
)
def __init__(self,
plane_number,
child_front,
child_back,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
first_face,
number_of_faces):
self.plane_number = plane_number
self.children = child_front, child_back
self.bounding_box_min = bounding_box_min_x, bounding_box_min_y, bounding_box_min_z
self.bounding_box_max = bounding_box_max_x, bounding_box_max_y, bounding_box_max_z
self.first_face = first_face
self.number_of_faces = number_of_faces
@classmethod
def write(cls, file, node):
node_data = struct.pack(cls.format,
node.plane_number,
*node.children,
*node.bounding_box_min,
*node.bounding_box_max,
node.first_face,
node.number_of_faces)
file.write(node_data)
@classmethod
def read(cls, file):
node_data = file.read(cls.size)
node_struct = struct.unpack(cls.format, node_data)
return Node(*node_struct)
class Nodes(ClassSequence):
Class = Node
class SurfaceFlag:
LIGHT = 0x1
SLICK = 0x2
SKY = 0x4
WARP = 0x8
TRANS33 = 0x10
TRANS66 = 0x20
FLOWING = 0x40
NODRAW = 0x80
class TextureInfo:
"""Class for representing a texture info
Attributes:
s: The s vector in texture space represented as an XYZ three-tuple.
s_offset: Horizontal offset in texture space.
t: The t vector in texture space represented as an XYZ three-tuple.
t_offset: Vertical offset in texture space.
flags: A bitfield of surface behaviors.
value:
texture_name: The path of the texture.
next_texture_info: For animated textures. Sequence will be terminated
with a value of -1
"""
format = '<8f2i32si'
size = struct.calcsize(format)
__slots__ = (
's',
's_offset',
't',
't_offset',
'flags',
'value',
'texture_name',
'next_texture_info'
)
def __init__(self,
s_x,
s_y,
s_z,
s_offset,
t_x,
t_y,
t_z,
t_offset,
flags,
value,
texture_name,
next_texture_info):
self.s = s_x, s_y, s_z
self.s_offset = s_offset
self.t = t_x, t_y, t_z
self.t_offset = t_offset
self.flags = flags
self.value = value
if type(texture_name) == bytes:
self.texture_name = texture_name.split(b'\00')[0].decode('ascii')
else:
self.texture_name = texture_name
self.next_texture_info = next_texture_info
@classmethod
def write(cls, file, texture_info):
texture_info_data = struct.pack(cls.format,
*texture_info.s,
texture_info.s_offset,
*texture_info.t,
texture_info.t_offset,
texture_info.flags,
texture_info.value,
texture_info.texture_name.encode('ascii'),
texture_info.next_texture_info)
file.write(texture_info_data)
@classmethod
def read(cls, file):
texture_info_data = file.read(cls.size)
texture_info_struct = struct.unpack(cls.format, texture_info_data)
return TextureInfo(*texture_info_struct)
class TextureInfos(ClassSequence):
Class = TextureInfo
class Face:
"""Class for representing a face
Attributes:
plane_number: The plane in which the face lies.
side: Which side of the plane the face lies. 0 is the front, 1 is the
back.
first_edge: The number of the first edge in Bsp.surf_edges.
number_of_edges: The number of edges contained within the face. These
are stored in consecutive order in Bsp.surf_edges starting at
Face.first_edge.
texture_info: The number of the texture info for this face.
styles: A four-tuple of lightmap styles.
light_offset: The offset into the lighting data.
"""
format = '<Hhi2h4Bi'
size = struct.calcsize(format)
__slots__ = (
'plane_number',
'side',
'first_edge',
'number_of_edges',
'texture_info',
'styles',
'light_offset'
)
def __init__(self,
plane_number,
side,
first_edge,
number_of_edges,
texture_info,
style_0,
style_1,
style_2,
style_3,
light_offset):
self.plane_number = plane_number
self.side = side
self.first_edge = first_edge
self.number_of_edges = number_of_edges
self.texture_info = texture_info
self.styles = style_0, style_1, style_2, style_3
self.light_offset = light_offset
@classmethod
def write(cls, file, plane):
face_data = struct.pack(cls.format,
plane.plane_number,
plane.side,
plane.first_edge,
plane.number_of_edges,
plane.texture_info,
*plane.styles,
plane.light_offset)
file.write(face_data)
@classmethod
def read(cls, file):
face_data = file.read(cls.size)
face_struct = struct.unpack(cls.format, face_data)
return Face(*face_struct)
class Faces(ClassSequence):
Class = Face
class Lighting:
@classmethod
def write(cls, file, lighting):
file.write(lighting)
@classmethod
def read(cls, file):
return file.read()
class Contents:
SOLID = 1
WINDOW = 2
AUX = 4
LAVA = 8
SLIME = 16
WATER = 32
MIST = 64
LAST_VISIBLE = 64
AREAPORTAL = 0x8000
PLAYERCLIP = 0x10000
MONSTERCLIP = 0x20000
CURRENT_0 = 0x40000
CURRENT_90 = 0x80000
CURRENT_180 = 0x100000
CURRENT_270 = 0x200000
CURRENT_UP = 0x400000
CURRENT_DOWN = 0x800000
ORIGIN = 0x1000000
MONSTER = 0x2000000
DEADMONSTER = 0x4000000
DETAIL = 0x8000000
TRANSLUCENT = 0x10000000
LADDER = 0x20000000
class Leaf:
"""Class for representing a leaf
Attributes:
contents: The content of the leaf. Affect the player's view.
cluster: The cluster containing this leaf. -1 for no visibility info.
area: The area containing this leaf.
bounding_box_min: The minimum coordinate of the bounding box containing
this node.
bounding_box_max: The maximum coordinate of the bounding box containing
this node.
first_leaf_face: The number of the first face in Bsp.faces
number_of_leaf_faces: The number of faces contained within the leaf.
These are stored in consecutive order in Bsp.faces at
Leaf.first_leaf_face.
first_leaf_brush: The number of the first brush in Bsp.brushes
number_of_leaf_brushes: The number of brushes contained within the
leaf. These are stored in consecutive order in Bsp.brushes at
Leaf.first_leaf_brush.
"""
format = '<i8h4H'
size = struct.calcsize(format)
__slots__ = (
'contents',
'cluster',
'area',
'bounding_box_min',
'bounding_box_max',
'first_leaf_face',
'number_of_leaf_faces',
'first_leaf_brush',
'number_of_leaf_brushes'
)
def __init__(self,
contents,
cluster,
area,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
first_leaf_face,
number_of_leaf_faces,
first_leaf_brush,
number_of_leaf_brushes):
self.contents = contents
self.cluster = cluster
self.area = area
self.bounding_box_min = bounding_box_min_x, bounding_box_min_y, bounding_box_min_z
self.bounding_box_max = bounding_box_max_x, bounding_box_max_y, bounding_box_max_z
self.first_leaf_face = first_leaf_face
self.number_of_leaf_faces = number_of_leaf_faces
self.first_leaf_brush = first_leaf_brush
self.number_of_leaf_brushes = number_of_leaf_brushes
@classmethod
def write(cls, file, leaf):
leaf_data = struct.pack(cls.format,
leaf.contents,
leaf.cluster,
leaf.area,
*leaf.bounding_box_min,
*leaf.bounding_box_max,
leaf.first_leaf_face,
leaf.number_of_leaf_faces,
leaf.first_leaf_brush,
leaf.number_of_leaf_brushes)
file.write(leaf_data)
@classmethod
def read(cls, file):
leaf_data = file.read(cls.size)
leaf_struct = struct.unpack(cls.format, leaf_data)
return Leaf(*leaf_struct)
class Leafs(ClassSequence):
Class = Leaf
class LeafFaces:
@classmethod
def write(cls, file, leaf_faces):
leaf_faces_format = '<{}H'.format(len(leaf_faces))
leaf_faces_data = struct.pack(leaf_faces_format, *leaf_faces)
file.write(leaf_faces_data)
@classmethod
def read(cls, file):
return [lf[0] for lf in struct.iter_unpack('<H', file.read())]
class LeafBrushes:
@classmethod
def write(cls, file, leaf_brushes):
leaf_brushes_format = '<{}H'.format(len(leaf_brushes))
leaf_brushes_data = struct.pack(leaf_brushes_format, *leaf_brushes)
file.write(leaf_brushes_data)
@classmethod
def read(cls, file):
return [lb[0] for lb in struct.iter_unpack('<H', file.read())]
class Edge:
"""Class for representing a edge
Attributes:
vertexes: A two-tuple of vertexes that form the edge. Vertex 0 is the
start vertex, and 1 is the end vertex.
"""
format = '<2H'
size = struct.calcsize(format)
__slots__ = (
'vertexes'
)
def __init__(self, vertex_0, vertex_1):
self.vertexes = vertex_0, vertex_1
def __getitem__(self, item):
if item > 1:
raise IndexError('list index of out of range')
return self.vertexes[item]
@classmethod
def write(cls, file, edge):
edge_data = struct.pack(cls.format,
*edge.vertexes)
file.write(edge_data)
@classmethod
def read(cls, file):
edge_data = file.read(cls.size)
edge_struct = struct.unpack(cls.format, edge_data)
return Edge(*edge_struct)
class Edges(ClassSequence):
Class = Edge
class SurfEdges:
@classmethod
def write(cls, file, surf_edges):
surf_edges_format = '<{}H'.format(len(surf_edges))
surf_edges_data = struct.pack(surf_edges_format, *surf_edges)
file.write(surf_edges_data)
@classmethod
def read(cls, file):
return [se[0] for se in struct.iter_unpack('<H', file.read())]
class Model:
"""Class for representing a model
Attributes:
bounding_box_min: The minimum coordinate of the bounding box containing
the model.
bounding_box_max: The maximum coordinate of the bounding box containing
the model.
origin: The origin of the model.
head_node: A four-tuple of indexes. Corresponds to number of map hulls.
visleafs: The number of leaves in the bsp tree?
first_face: The number of the first face in Bsp.mark_surfaces.
number_of_faces: The number of faces contained in the node. These
are stored in consecutive order in Bsp.mark_surfaces starting at
Model.first_face.
"""
format = '<9f3i'
size = struct.calcsize(format)
__slots__ = (
'bounding_box_min',
'bounding_box_max',
'origin',
'head_node',
'first_face',
'number_of_faces'
)
def __init__(self,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
origin_x,
origin_y,
origin_z,
head_node,
first_face,
number_of_faces):
self.bounding_box_min = bounding_box_min_x, bounding_box_min_y, bounding_box_min_z
self.bounding_box_max = bounding_box_max_x, bounding_box_max_y, bounding_box_max_z
self.origin = origin_x, origin_y, origin_z
self.head_node = head_node
self.first_face = first_face
self.number_of_faces = number_of_faces
@classmethod
def write(cls, file, model):
model_data = struct.pack(cls.format,
*model.bounding_box_min,
*model.bounding_box_max,
*model.origin,
model.head_node,
model.first_face,
model.number_of_faces)
file.write(model_data)
@classmethod
def read(cls, file):
model_data = file.read(cls.size)
model_struct = struct.unpack(cls.format, model_data)
return Model(*model_struct)
class Models(ClassSequence):
Class = Model
class Brush:
format = '<3i'
size = struct.calcsize(format)
__slots__ = (
'first_side',
'number_of_sides',
'contents'
)
def __init__(self,
first_side,
number_of_sides,
contents):
self.first_side = first_side
self.number_of_sides = number_of_sides
self.contents = contents
@classmethod
def write(cls, file, brush):
brush_data = struct.pack(cls.format,
brush.first_side,
brush.number_of_sides,
brush.contents)
file.write(brush_data)
@classmethod
def read(cls, file):
brush_data = file.read(cls.size)
brush_struct = struct.unpack(cls.format, brush_data)
return Brush(*brush_struct)
class Brushes(ClassSequence):
Class = Brush
class BrushSide:
format = '<Hh'
size = struct.calcsize(format)
__slots__ = (
'plane_number',
'texture_info'
)
def __init__(self,
plane_number,
texture_info):
self.plane_number = plane_number
self.texture_info = texture_info
@classmethod
def write(cls, file, brush_side):
brush_side_data = struct.pack(cls.format,
brush_side.plane_number,
brush_side.texture_info)
file.write(brush_side_data)
@classmethod
def read(cls, file):
brush_side_data = file.read(cls.size)
brush_side_struct = struct.unpack(cls.format, brush_side_data)
return BrushSide(*brush_side_struct)
class BrushSides(ClassSequence):
Class = BrushSide
class Pop:
@classmethod
def write(cls, file, structures):
file.write(structures)
@classmethod
def read(cls, file):
return file.read()
class Area:
format = '<2i'
size = struct.calcsize(format)
__slots__ = (
'number_of_area_portals',
'first_area_portal'
)
def __init__(self,
number_of_area_portals,
first_area_portal):
self.number_of_area_portals = number_of_area_portals
self.first_area_portal = first_area_portal
@classmethod
def write(cls, file, area):
area_data = struct.pack(cls.format,
area.number_of_area_portals,
area.first_area_portal)
file.write(area_data)
@classmethod
def read(cls, file):
area_data = file.read(cls.size)
area_struct = struct.unpack(cls.format, area_data)
return Area(*area_struct)
class Areas(ClassSequence):
Class = Area
class AreaPortal:
format = '<2i'
size = struct.calcsize(format)
__slots__ = (
'portal_number',
'other_area'
)
def __init__(self,
portal_number,
other_area):
self.portal_number = portal_number
self.other_area = other_area
@classmethod
def write(cls, file, area):
area_data = struct.pack(cls.format,
area.portal_number,
area.other_area)
file.write(area_data)
@classmethod
def read(cls, file):
area_data = file.read(cls.size)
area_struct = struct.unpack(cls.format, area_data)
return AreaPortal(*area_struct)
class AreaPortals(ClassSequence):
Class = AreaPortal
class Lump:
"""Class for representing a lump.
A lump is a section of data that typically contains a sequence of data
structures.
Attributes:
offset: The offset of the lump entry from the start of the file.
length: The length of the lump entry.
"""
format = '<2i'
size = struct.calcsize(format)
__slots__ = (
'offset',
'length'
)
def __init__(self, offset, length):
self.offset = offset
self.length = length
@classmethod
def write(cls, file, lump):
lump_data = struct.pack(cls.format,
lump.offset,
lump.length)
file.write(lump_data)
@classmethod
def read(cls, file):
lump_data = file.read(cls.size)
lump_struct = struct.unpack(cls.format, lump_data)
return Lump(*lump_struct)
class Header:
"""Class for representing a Bsp file header
Attributes:
identity: The file identity. Should be b'IBSP'.
version: The file version. Should be 38.
lumps: A sequence of nineteen Lumps
"""
format = '<4si{}'.format(Lump.format[1:] * 19)
size = struct.calcsize(format)
order = [
Entities,
Planes,
Vertexes,
Visibilities,
Nodes,
TextureInfos,
Faces,
Lighting,
Leafs,
LeafFaces,
LeafBrushes,
Edges,
SurfEdges,
Models,
Brushes,
BrushSides,
Pop,
Areas,
AreaPortals
]
__slots__ = (
'identity',
'version',
'lumps'
)
def __init__(self,
identity,
version,
lumps):
self.identity = identity
self.version = version
self.lumps = lumps
@classmethod
def write(cls, file, header):
lump_values = []
for lump in header.lumps:
lump_values += lump.offset, lump.length
header_data = struct.pack(cls.format,
header.identity,
header.version,
*lump_values)
file.write(header_data)
@classmethod
def read(cls, file):
data = file.read(cls.size)
lumps_start = struct.calcsize('<4si')
header_data = data[:lumps_start]
header_struct = struct.unpack('<4si', header_data)
ident = header_struct[0]
version = header_struct[1]
lumps_data = data[lumps_start:]
lumps = [Lump(*l) for l in struct.iter_unpack(Lump.format, lumps_data)]
return Header(ident, version, lumps)
class Bsp:
"""Class for working with Bsp files
Example:
b = Bsp.open(file)
Attributes:
identity: Identity of the Bsp file. Should be b'IBSP'
version: Version of the Bsp file. Should be 38
entities: A string containing the entity definitions.
planes: A list of Plane objects used by the bsp tree data structure.
vertexes: A list of Vertex objects.
visibilities: A list of integers representing visibility data.
nodes: A list of Node objects used by the bsp tree data structure.
texture_infos: A list of TextureInfo objects.
faces: A list of Face objects.
lighting: A list of ints representing lighting data.
leafs: A list of Leaf objects used by the bsp tree data structure.
leaf_faces: A list of ints representing a consecutive list of faces
used by the Leaf objects.
leaf_brushes: A list of ints representing a consecutive list of edges
used by the Leaf objects.
edges: A list of Edge objects.
surf_edges: A list of ints representing a consecutive list of edges
used by the Face objects.
models: A list of Model objects.
brushes: A list of Brush objects.
brush_sides: A list of BrushSide objects.
pop: Proof of purchase? Always 256 bytes of null data if present.
areas: A list of Area objects.
area_portals: A list of AreaPortal objects.
"""
def __init__(self):
self.fp = None
self.mode = None
self._did_modify = False
self.identity = b'IBSP'
self.version = 38
self.entities = ""
self.planes = []
self.vertexes = []
self.visibilities = []
self.nodes = []
self.texture_infos = []
self.faces = []
self.lighting = b''
self.leafs = []
self.leaf_faces = []
self.leaf_brushes = []
self.edges = []
self.surf_edges = []
self.models = []
self.brushes = []
self.brush_sides = []
self.pop = []
self.areas = []
self.area_portals = []
Lump = Lump
Header = Header
Entities = Entities
Planes = Planes
Vertexes = Vertexes
Visibilities = Visibilities
Visibilities = Visibilities
Nodes = Nodes
TextureInfos = TextureInfos
Faces = Faces
Lighting = Lighting
Leafs = Leafs
LeafFaces = LeafFaces
LeafBrushes = LeafBrushes
Edges = Edges
SurfEdges = SurfEdges
Models = Models
Brushes = Brushes
BrushSides = BrushSides
Pop = Pop
Areas = Areas
AreaPortals = AreaPortals
@classmethod
def open(cls, file, mode='r'):
"""Returns a Bsp object
Args:
file: Either the path to the file, a file-like object, or bytes.
mode: An optional string that indicates which mode to open the file
Returns:
An Bsp object constructed from the information read from the
file-like object.
Raises:
ValueError: If an invalid file mode is given.
RuntimeError: If the file argument is not a file-like object.
"""
if mode not in ('r', 'w', 'a'):
raise ValueError("invalid mode: '%s'" % mode)
filemode = {'r': 'rb', 'w': 'w+b', 'a': 'r+b'}[mode]
if isinstance(file, str):
file = io.open(file, filemode)
elif isinstance(file, bytes):
file = io.BytesIO(file)
elif not hasattr(file, 'read'):
raise RuntimeError(
"Bsp.open() requires 'file' to be a path, a file-like object, "
"or bytes")
# Read
if mode == 'r':
return cls._read_file(file, mode)
# Write
elif mode == 'w':
bsp = cls()
bsp.fp = file
bsp.mode = 'w'
bsp._did_modify = True
return bsp
# Append
else:
bsp = cls._read_file(file, mode)
bsp._did_modify = True
return bsp
@classmethod
def _read_file(cls, file, mode):
def _read_lump(Class):
lump = header.lumps[header.order.index(Class)]
file.seek(lump.offset)
return Class.read(io.BytesIO(file.read(lump.length)))
bsp = cls()
bsp.mode = mode
bsp.fp = file
# Header
header = cls.Header.read(file)
bsp.identity = header.identity
bsp.version = header.version
bsp.entities = _read_lump(cls.Entities)
bsp.planes = _read_lump(cls.Planes)
bsp.vertexes = _read_lump(cls.Vertexes)
bsp.visibilities = _read_lump(cls.Visibilities)
bsp.nodes = _read_lump(cls.Nodes)
bsp.texture_infos = _read_lump(cls.TextureInfos)
bsp.faces = _read_lump(cls.Faces)
bsp.lighting = _read_lump(cls.Lighting)
bsp.leafs = _read_lump(cls.Leafs)
bsp.leaf_faces = _read_lump(cls.LeafFaces)
bsp.leaf_brushes = _read_lump(cls.LeafBrushes)
bsp.edges = _read_lump(cls.Edges)
bsp.surf_edges = _read_lump(cls.SurfEdges)
bsp.models = _read_lump(cls.Models)
bsp.brushes = _read_lump(cls.Brushes)
bsp.brush_sides = _read_lump(cls.BrushSides)
bsp.pop = _read_lump(cls.Pop)
bsp.areas = _read_lump(cls.Areas)
bsp.area_portals = _read_lump(cls.AreaPortals)
return bsp
@classmethod
def _write_file(cls, file, bsp):
def _write_lump(Class, data):
offset = file.tell()
Class.write(file, data)
size = file.tell() - offset
return cls.Lump(offset, size)
lumps = [cls.Lump(0, 0) for _ in range(19)]
header = cls.Header(bsp.identity, bsp.version, lumps)
lump_index = header.order.index
# Stub out header info
cls.Header.write(file, header)
lumps[lump_index(cls.Entities)] = _write_lump(cls.Entities, bsp.entities)
lumps[lump_index(cls.Planes)] = _write_lump(cls.Planes, bsp.planes)
lumps[lump_index(cls.Vertexes)] = _write_lump(cls.Vertexes, bsp.vertexes)
lumps[lump_index(cls.Visibilities)] = _write_lump(cls.Visibilities, bsp.visibilities)
lumps[lump_index(cls.Nodes)] = _write_lump(cls.Nodes, bsp.nodes)
lumps[lump_index(cls.TextureInfos)] = _write_lump(cls.TextureInfos, bsp.texture_infos)
lumps[lump_index(cls.Faces)] = _write_lump(cls.Faces, bsp.faces)
lumps[lump_index(cls.Lighting)] = _write_lump(cls.Lighting, bsp.lighting)
lumps[lump_index(cls.Leafs)] = _write_lump(cls.Leafs, bsp.leafs)
lumps[lump_index(cls.LeafFaces)] = _write_lump(cls.LeafFaces, bsp.leaf_faces)
lumps[lump_index(cls.LeafBrushes)] = _write_lump(cls.LeafBrushes, bsp.leaf_brushes)
lumps[lump_index(cls.Edges)] = _write_lump(cls.Edges, bsp.edges)
lumps[lump_index(cls.SurfEdges)] = _write_lump(cls.SurfEdges, bsp.surf_edges)
lumps[lump_index(cls.Models)] = _write_lump(cls.Models, bsp.models)
lumps[lump_index(cls.Brushes)] = _write_lump(cls.Brushes, bsp.brushes)
lumps[lump_index(cls.BrushSides)] = _write_lump(cls.BrushSides, bsp.brush_sides)
lumps[lump_index(cls.Pop)] = _write_lump(cls.Pop, bsp.pop)
lumps[lump_index(cls.Areas)] = _write_lump(cls.Areas, bsp.areas)
lumps[lump_index(cls.AreaPortals)] = _write_lump(cls.AreaPortals, bsp.area_portals)
end_of_file = file.tell()
# Finalize header
file.seek(0)
cls.Header.write(file, header)
file.seek(end_of_file)
def save(self, file):
"""Writes Bsp data to file
Args:
file: Either the path to the file, or a file-like object, or bytes.
Raises:
RuntimeError: If the file argument is not a file-like object.
"""
should_close = False
if isinstance(file, str):
file = io.open(file, 'r+b')
should_close = True
elif isinstance(file, bytes):
file = io.BytesIO(file)
should_close = True
elif not hasattr(file, 'write'):
raise RuntimeError(
"Bsp.open() requires 'file' to be a path, a file-like object, "
"or bytes")
self._write_file(file, self)
if should_close:
file.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""Closes the file pointer if possible. If mode is 'w' or 'a', the file
will be written to.
"""
if self.fp:
if self.mode in ('w', 'a') and self._did_modify:
self.fp.seek(0)
self._write_file(self.fp, self)
self.fp.truncate()
file_object = self.fp
self.fp = None
file_object.close()
| [
"[email protected]"
]
| |
6447b4421e3a2256c272226eb874c95411fda479 | 8dffff5ff7f2645a50fd9846198e12e4c96a91da | /18-letter-count.py | ab86a66578554a66e0cb43fd008cdfbc21744bb6 | []
| no_license | akshaypawar2508/Coderbyte-pythonSol | b233c5ee0c34e0413a26b24b423dae45342b9ade | 5c7d2028fe09fd02aad7808f88abc40fdea0f81e | refs/heads/master | 2022-01-03T09:44:18.635060 | 2014-07-31T13:32:08 | 2014-07-31T13:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | def LetterCountI(str):
for word in str.split():
for i in range(len(word)):
if word[i] in word[i+1:]:
return word
return -1
# keep this function call here
# to see how to enter arguments in Python scroll down
print LetterCountI(raw_input())
| [
"[email protected]"
]
| |
0b3eeb02095fbf2030db653bc03576071c4a956a | 9672fa478478085b69c7ef8f02eaa7fa0bc7767b | /symphony/cli/pyinventory/graphql/fragment/service_endpoint.py | f22a4f54006c151f24a0aaab059869fd9813ff4f | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | julianchr/magma | 437a1d86490ff5f1d279cf2cd3243bbd3f22f715 | f0b2ed7e08314208133cf722921d6e6ab7853825 | refs/heads/master | 2022-09-21T21:45:14.678593 | 2020-05-28T22:47:52 | 2020-05-28T22:49:52 | 267,723,888 | 0 | 0 | NOASSERTION | 2020-05-29T00:07:02 | 2020-05-29T00:07:01 | null | UTF-8 | Python | false | false | 1,352 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from gql.gql.client import OperationException
from gql.gql.reporter import FailedOperationException
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin
from ..fragment.equipment_port import EquipmentPortFragment, QUERY as EquipmentPortFragmentQuery
from ..fragment.service_endpoint_definition import ServiceEndpointDefinitionFragment, QUERY as ServiceEndpointDefinitionFragmentQuery
QUERY: List[str] = EquipmentPortFragmentQuery + ServiceEndpointDefinitionFragmentQuery + ["""
fragment ServiceEndpointFragment on ServiceEndpoint {
id
port {
...EquipmentPortFragment
}
definition {
...ServiceEndpointDefinitionFragment
}
}
"""]
@dataclass
class ServiceEndpointFragment(DataClassJsonMixin):
@dataclass
class EquipmentPort(EquipmentPortFragment):
pass
@dataclass
class ServiceEndpointDefinition(ServiceEndpointDefinitionFragment):
pass
id: str
definition: ServiceEndpointDefinition
port: Optional[EquipmentPort]
| [
"[email protected]"
]
| |
f5d319d69486e544284d5a391d18304dd20f00fe | d29fd8ac20bf53f366821892bf5a80005a9cef36 | /tests/apps/pages_directory.py | 2d096f87c51ae88ef0ebd1ac72dc6772f44a26cb | [
"MIT"
]
| permissive | T4rk1n/dazzler | d325ff664c6e310374f08cea84bd53aa1ca2ca43 | 69c49422dc19c910445ab265b1d3481041de8f43 | refs/heads/master | 2023-02-11T02:39:08.423597 | 2021-12-06T03:16:49 | 2021-12-06T03:34:25 | 191,060,792 | 19 | 7 | MIT | 2023-01-23T11:02:57 | 2019-06-09T22:16:59 | Python | UTF-8 | Python | false | false | 146 | py | from dazzler import Dazzler
app = Dazzler(__name__)
app.config.pages_directory = 'page_dir'
if __name__ == '__main__':
app.start('--debug')
| [
"[email protected]"
]
| |
c72e3c3dcb8a88238fa6b42cb63e1df026e8c669 | d2d6bbb76fd92ad596b0476b37ac8dd5cf08df14 | /1.9 LISTAS.py | a97405df71a8abca906f6bf2d182f2441b9b24db | []
| no_license | edneyefs/curso_python | b917d8f2c405173af901287dab86264ff937aaa6 | 2c862ad62223b7c3bd0ea7d7410a9b69c38d814d | refs/heads/master | 2022-12-14T21:29:59.875637 | 2020-08-21T12:42:07 | 2020-08-21T12:42:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | lista = []
print(type(lista))
print(dir(lista))
print(help(lista))
print(len(lista))#contador
lista.append(1)
lista.append(5)
print(len(lista))
nova_lista = [1, 4, 'Ana', 'Bia']
#print(nova_lista)
nova_lista.remove(4)
#print(nova_lista)
nova_lista.reverse()
print(nova_lista)
lista = [1, 5, 'Rebeca', 'Guilherme', 3.1415]
print(lista.index(1))
print(lista[2])
print(lista[-1])
lista = ['Ana', 'Lia', 'Rui', 'Paulo', 'Dani']
print(lista[1:3])
print(lista[1:-1])
print(lista[1:])
print(lista[::2])
print(lista[::-1])
del lista[2]
print(lista)
del lista[1:]
print(lista)
| [
"[email protected]"
]
| |
c77de50c1bc3274824ecd3f3cc23faa27d6840d7 | 4c3dd270440c48a0a8e87d1937844371476f7cef | /resource_wrangler/scripts/download_mods.py | cb42130c64e1983371fe8880c460d6c88f9945b7 | []
| no_license | Soartex-Modded/Resource-Wrangler | f84726bf5ffb246d8562149fb6cc0a613a4f4043 | 36c6f7059bb876e034c99d5e02fca1cf81888dac | refs/heads/master | 2023-01-25T00:34:22.900581 | 2020-11-29T23:00:35 | 2020-11-29T23:00:35 | 309,116,894 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,085 | py | import json
import math
import os
import requests
from sqlalchemy import Table, Column, Integer, String, MetaData
from sqlalchemy import create_engine
from sqlalchemy.sql import select
def download_mods(mods_dirs, database_path, mod_limit=100):
"""
Collect the top mods from CurseForge into mods_dirs
:param mods_dirs: {[minor_version]: [path to mods folder]}
:param database_path: path to .db file with download history (will be created if not exists)
:param mod_limit: maximum number of mods to collect
"""
mods_dirs = {k: os.path.expanduser(v) for k, v in mods_dirs.items()}
database_path = os.path.expanduser(database_path)
patch_info = {}
for minor_version in mods_dirs:
patch_info[minor_version] = {}
os.makedirs(mods_dirs[minor_version], exist_ok=True)
os.makedirs(os.path.dirname(database_path), exist_ok=True)
engine = create_engine('sqlite:///' + database_path)
metadata = MetaData()
mod_files = Table('mod_files', metadata,
Column('id', Integer, primary_key=True),
Column('file_name', String(250)),
Column('mod_id', Integer),
Column('vanilla_minor_version', Integer))
metadata.create_all(engine)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36',
}
page_size = 50
mod_count = 0
# download sets of mod information at a time
for page_index in range(math.ceil(mod_limit / page_size)):
mods = requests.get(
"https://addons-ecs.forgesvc.net/api/v2/addon/search/",
params={
'gameId': 432,
'index': page_index * page_size,
'pageSize': page_size,
'sort': 'TotalDownloads',
'sortDescending': True
},
headers=headers).json()
for mod_meta in mods:
mod_count += 1
if mod_count > mod_limit:
return
if mod_meta['categorySection']['name'] != 'Mods':
continue
versioned_mod_files = {}
for mod_file_meta in mod_meta['gameVersionLatestFiles']:
tokens = mod_file_meta['gameVersion'].split('.')
minor_version = int(tokens[1])
patch_version = 0 if len(tokens) == 2 else int(tokens[2])
# find latest mod files
if minor_version in versioned_mod_files:
if versioned_mod_files[minor_version]['patch_version'] > patch_version:
continue
prior_file_id = versioned_mod_files.get(minor_version, {}).get('value', {}).get('projectFileId', 0)
if mod_file_meta['projectFileId'] > prior_file_id:
versioned_mod_files[minor_version] = {
'patch_version': patch_version,
'value': mod_file_meta
}
for minor_version in versioned_mod_files:
if str(minor_version) not in mods_dirs:
continue
mod_file_meta = versioned_mod_files[minor_version]['value']
patch_info[str(minor_version)][mod_file_meta["projectFileName"]] = {
"mod_id": mod_meta['slug'],
"mod_name": mod_meta['name'],
# typically contains the mod version inside somewhere
"mod_filename": mod_file_meta['projectFileName'],
"mc_version": mod_file_meta['gameVersion'],
"mod_authors": [auth['name'] for auth in mod_meta['authors']],
"url_website": mod_meta['websiteUrl'],
"description": mod_meta.get('summary')
}
available_file_name = mod_file_meta['projectFileName']
stored_file_name = engine.execute(select([mod_files.c.file_name]).where(
(mod_files.c.mod_id == mod_meta['id']) & (mod_files.c.vanilla_minor_version == minor_version))
).scalar()
if stored_file_name == available_file_name:
# file is already current
# print(f'Skipping {mod_meta["name"]} for 1.{minor_version}')
continue
mod_path = os.path.join(mods_dirs[str(minor_version)], mod_file_meta['projectFileName'])
if os.path.exists(mod_path):
engine.execute(mod_files.insert(),
file_name=available_file_name,
mod_id=mod_meta['id'],
vanilla_minor_version=minor_version)
continue
download_url = requests.get(
f"https://addons-ecs.forgesvc.net/api/v2/addon/{mod_meta['id']}/file/{mod_file_meta['projectFileId']}/download-url",
headers=headers).text
print(f'Downloading {mod_meta["name"]} for 1.{minor_version}')
with open(mod_path, 'wb') as mod_file:
mod_file.write(requests.get(download_url, headers=headers).content)
if stored_file_name is None:
engine.execute(mod_files.insert(),
file_name=available_file_name,
mod_id=mod_meta['id'],
vanilla_minor_version=minor_version)
else:
engine.execute(mod_files.update()
.where((mod_files.c.mod_id == mod_meta['id']) & (mod_files.c.vanilla_minor_version == minor_version))
.values(file_name=available_file_name))
for minor_version in patch_info:
with open(os.path.join(mods_dirs[str(minor_version)], "patch_info.json"), 'w') as patch_info_file:
json.dump(patch_info[minor_version], patch_info_file, indent=4)
| [
"[email protected]"
]
| |
ca016bd689fb246e19dc877a574e00c0cd0e1ec1 | 2b6e1b7bd7065229054b4cdecd40daa5e251c22d | /src/models/dqn.py | 4195fc5933776981002b4d8d68a69c1ac3b934bb | []
| no_license | raufer/deep-q-learning | b9be99c41829e8d62cd350cd279e5ddc135e7809 | c31b8803a45bcf1f22f1c4552daf48b9a284dd5c | refs/heads/main | 2023-06-19T06:01:49.867163 | 2021-07-20T13:35:30 | 2021-07-20T13:35:30 | 379,271,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,506 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.config import config
class DQN(nn.Module):
"""
Assumption: the environment is deterministic
so all equations presented here are also formulated deterministically for the sake of simplicity.
In the reinforcement learning literature, they would also contain expectations
over stochastic transitions in the environment.
Our aim is to train a policy that tries to maximize the discounter, cumulative reward
R = sum_{t=t0}^{inf} 𝛾^t * r_t
The discount, 𝛾 , should be a constant between 0 and 1 that ensures the sum converges.
It makes rewards from the uncertain, far future, less important for our agent
than the ones in the near future that it can be more confident about
The main idea behind Q-learning is:
If we had a function Q* :: (S, A) -> R (scalar) that could tell us the real return of
taking an action A at the state S, then we could easily construct an optimal policy:
policy*(s) = argmax {a} Q*(S, a)
This policy would always maximize our rewards
However, we dont know everything about the world, so we do not have direct access to Q*
Nevertheless, We can use function approximation techniques to approximate Q*
For the training update rule, we'll use the fact that every function Q for some policy
obeys the Bellman Equation:
Q_pi(s, a) = r + gamma * max {a'} Q_pi(s', a')
The difference between the two sides of the equality is known as the temporal
difference error
delta = Q(s,a) - (r + gamma max {a} Q(s', a))
To minimize this error, we'll use the Hubber loss:
* MSE when the error is small (< 1)
* MAE when the error is large (> 1)
(more robust to outliers)
This error is calculated over a batch of transitions B
sampled from the replay memory
L = 1 / |B| * sum {(s, a, s', r) in B} L(delta)
with L(delta) =
1/2 delta**2 for |delta| < 1
|delta| - 1/2 otherwise
Q-network
Our model is a convolutional neural network that takes as input
the different between the current and previous screen patches.
It has two outputs representing Q(s, left) and Q(s, right),
where s is the input to the network.
In effect, the network is trying to predict the quality/value of
taking each action given the current input
"""
def __init__(self, h, w, outputs):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size=5, stride=2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
| [
"[email protected]"
]
| |
947d1a396be059afdf7d383c3ba8875ac7652ea0 | e6342576193fd70937ab1cead8d9504f5a1a0b9b | /basic_api/admin.py | 55af462e17a4184687dafcbebc6dba0b8735234e | []
| no_license | shubham1560/Django-rest-angular-frontend | 41acfad812d522c12b2f2c70931bbf882e1f5f85 | 306151ebf772b036204bb709096b0eaea0a8d552 | refs/heads/master | 2020-07-15T22:08:16.271011 | 2019-09-19T18:40:41 | 2019-09-19T18:40:41 | 205,658,813 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.contrib import admin
from .models import Summary
# Register your models here.
admin.site.register(Summary)
| [
"[email protected]"
]
| |
38af83d170297d348201ba84ec024ff6782f1b88 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.4/tests/regressiontests/admin_custom_urls/urls.py | 12f440e54206905c1883af69161ca4715a9ff7be | []
| no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.4/tests/regressiontests/admin_custom_urls/urls.py | [
"[email protected]"
]
| |
548980782c09a047bbcc43b0e12a6dae822cdcc6 | ed1d841dbd836f5a02a8b2c22bcc92380f28d11b | /seed.py | 9b08aa64301e4ced1c79ad9d8a6e7a7e4658118c | []
| no_license | GraceDurham/ratings | b063389f368f0b3994f0771ca4cac46555a04a10 | 2e628c2a824ca5a10879a15282cd60e21695322b | refs/heads/master | 2020-05-23T07:59:29.310561 | 2017-02-03T02:00:36 | 2017-02-03T02:00:36 | 80,483,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,894 | py | """Utility file to seed ratings database from MovieLens data in seed_data/"""
from sqlalchemy import func
from model import User
from model import Rating
from model import Movie
from datetime import datetime
from model import connect_to_db, db
from server import app
def load_users():
"""Load users from u.user into database."""
print "Users"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
User.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.user"):
row = row.rstrip()
user_id, age, gender, occupation, zipcode = row.split("|")
user = User(user_id=user_id,
age=age,
zipcode=zipcode)
# We need to add to the session or it won't ever be stored
db.session.add(user)
# Once we're done, we should commit our work
db.session.commit()
def load_movies():
"""Load movies from u.item into database."""
print "Movies"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Movie.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.item"):
# striped the whitespace
row = row.rstrip()
# print "each row!", row
# we took the row and split it on the pipe
row_split = row.split("|")
# print "it's splitted!!", row_split
# sliced the giant list into only 0-4 index
first_five = row_split[:5]
# print "this is our short list", first_five
# unpacked the first five items from the u.item list
movie_id, title, released_at, empty, imdb_url = first_five
# print first_five
#Boolean if released at is not an empty string evaluates true
#set string to datetime object
# else make datetime equal none if no value is present in release at
if released_at:
released_at = datetime.strptime(released_at, "%d-%b-%Y")
else:
released_at = None
title = title[:-7] # (year) ==7
movie = Movie(movie_id=movie_id,
title=title,
released_at=released_at,
imdb_url=imdb_url)
# We need to add to the session or it won't ever be stored
db.session.add(movie)
# Once we're done, we should commit our work
db.session.commit()
def load_ratings():
"""Load ratings from u.data into database."""
print "Ratings"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Rating.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.data"):
row = row.strip().split()
user_id, movie_id, score, time_stamp = row
# print row
rating = Rating(
user_id=int(user_id),
movie_id=int(movie_id),
score=int(score))
# We need to add to the session or it won't ever be stored
db.session.add(rating)
# Once we're done, we should commit our work
db.session.commit()
def set_val_user_id():
"""Set value for the next user_id after seeding database"""
# Get the Max user_id in the database
result = db.session.query(func.max(User.user_id)).one()
max_id = int(result[0])
# Set the value for the next user_id to be max_id + 1
query = "SELECT setval('users_user_id_seq', :new_id)"
db.session.execute(query, {'new_id': max_id + 1})
db.session.commit()
if __name__ == "__main__":
connect_to_db(app)
# In case tables haven't been created, create them
db.create_all()
# Import different types of data
load_users()
load_movies()
load_ratings()
set_val_user_id()
| [
"[email protected]"
]
| |
55a4e8e8c4aa91e9545e39a617b5c10879c37d07 | 33eb4fd807c1a641f52f7124ec7b256ce07612f1 | /test/optimization/test_converters.py | ceb47854904a2313fb0dfe0c2ea5d0555a45b620 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | levbishop/qiskit-aqua | 9ee27da1533cbb9746fe5ff5255533bd9742faa5 | 50e4d935241452bb76296cea6144a9fc452c5e2c | refs/heads/master | 2022-12-04T01:48:18.477406 | 2020-08-11T19:25:03 | 2020-08-11T19:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,877 | py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Converters """
import logging
import unittest
from test.optimization.optimization_test_case import QiskitOptimizationTestCase
import numpy as np
from docplex.mp.model import Model
from qiskit.aqua.algorithms import NumPyMinimumEigensolver
from qiskit.aqua.operators import Z, I
from qiskit.optimization import QuadraticProgram, QiskitOptimizationError
from qiskit.optimization.algorithms import MinimumEigenOptimizer, CplexOptimizer, ADMMOptimizer
from qiskit.optimization.algorithms import OptimizationResult
from qiskit.optimization.algorithms.admm_optimizer import ADMMParameters
from qiskit.optimization.algorithms.optimization_algorithm import OptimizationResultStatus
from qiskit.optimization.converters import (InequalityToEquality, IntegerToBinary,
LinearEqualityToPenalty, QuadraticProgramToIsing,
IsingToQuadraticProgram)
from qiskit.optimization.problems import Constraint, Variable
logger = logging.getLogger(__name__)
QUBIT_OP_MAXIMIZE_SAMPLE = (
-199999.5 * (I ^ I ^ I ^ Z)
+ -399999.5 * (I ^ I ^ Z ^ I)
+ -599999.5 * (I ^ Z ^ I ^ I)
+ -799999.5 * (Z ^ I ^ I ^ I)
+ 100000 * (I ^ I ^ Z ^ Z)
+ 150000 * (I ^ Z ^ I ^ Z)
+ 300000 * (I ^ Z ^ Z ^ I)
+ 200000 * (Z ^ I ^ I ^ Z)
+ 400000 * (Z ^ I ^ Z ^ I)
+ 600000 * (Z ^ Z ^ I ^ I)
)
OFFSET_MAXIMIZE_SAMPLE = 1149998
class TestConverters(QiskitOptimizationTestCase):
"""Test Converters"""
def test_empty_problem(self):
""" Test empty problem """
op = QuadraticProgram()
conv = InequalityToEquality()
op = conv.convert(op)
conv = IntegerToBinary()
op = conv.convert(op)
conv = LinearEqualityToPenalty()
op = conv.convert(op)
_, shift = op.to_ising()
self.assertEqual(shift, 0.0)
def test_valid_variable_type(self):
"""Validate the types of the variables for QuadraticProgram.to_ising."""
# Integer variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.integer_var(0, 10, "int_var")
_ = op.to_ising()
# Continuous variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.continuous_var(0, 10, "continuous_var")
_ = op.to_ising()
def test_inequality_binary(self):
""" Test InequalityToEqualityConverter with binary variables """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
# Quadratic constraints
quadratic = {('x0', 'x1'): 1, ('x1', 'x2'): 2}
op.quadratic_constraint({}, quadratic, Constraint.Sense.LE, 3, 'x0x1_x1x2LE')
quadratic = {('x0', 'x1'): 3, ('x1', 'x2'): 4}
op.quadratic_constraint({}, quadratic, Constraint.Sense.GE, 3, 'x0x1_x1x2GE')
# Convert inequality constraints into equality constraints
conv = InequalityToEquality()
op2 = conv.convert(op)
self.assertListEqual([v.name for v in op2.variables],
['x0', 'x1', 'x2', 'x1x2@int_slack', 'x0x2@int_slack',
'x0x1_x1x2LE@int_slack', 'x0x1_x1x2GE@int_slack'])
# Check names and objective senses
self.assertEqual(op.name, op2.name)
self.assertEqual(op.objective.sense, op2.objective.sense)
# For linear constraints
lst = [
op2.linear_constraints[0].linear.to_dict()[0],
op2.linear_constraints[0].linear.to_dict()[1],
]
self.assertListEqual(lst, [1, 1])
self.assertEqual(op2.linear_constraints[0].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[1].linear.to_dict()[1],
op2.linear_constraints[1].linear.to_dict()[2],
op2.linear_constraints[1].linear.to_dict()[3],
]
self.assertListEqual(lst, [1, -1, 1])
lst = [op2.variables[3].lowerbound, op2.variables[3].upperbound]
self.assertListEqual(lst, [0, 3])
self.assertEqual(op2.linear_constraints[1].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[2].linear.to_dict()[0],
op2.linear_constraints[2].linear.to_dict()[2],
op2.linear_constraints[2].linear.to_dict()[4],
]
self.assertListEqual(lst, [1, 3, -1])
lst = [op2.variables[4].lowerbound, op2.variables[4].upperbound]
self.assertListEqual(lst, [0, 2])
self.assertEqual(op2.linear_constraints[2].sense, Constraint.Sense.EQ)
# For quadratic constraints
lst = [
op2.quadratic_constraints[0].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[0].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[0].linear.to_dict()[5],
]
self.assertListEqual(lst, [1, 2, 1])
lst = [op2.variables[5].lowerbound, op2.variables[5].upperbound]
self.assertListEqual(lst, [0, 3])
lst = [
op2.quadratic_constraints[1].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[1].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[1].linear.to_dict()[6],
]
self.assertListEqual(lst, [3, 4, -1])
lst = [op2.variables[6].lowerbound, op2.variables[6].upperbound]
self.assertListEqual(lst, [0, 4])
result = OptimizationResult(x=np.arange(7), fval=0, variables=op2.variables)
new_result = conv.interpret(result)
np.testing.assert_array_almost_equal(new_result.x, np.arange(3))
self.assertListEqual(new_result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(new_result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 2})
def test_inequality_integer(self):
""" Test InequalityToEqualityConverter with integer variables """
op = QuadraticProgram()
for i in range(3):
op.integer_var(name='x{}'.format(i), lowerbound=-3, upperbound=3)
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
# Quadratic constraints
quadratic = {('x0', 'x1'): 1, ('x1', 'x2'): 2}
op.quadratic_constraint({}, quadratic, Constraint.Sense.LE, 3, 'x0x1_x1x2LE')
quadratic = {('x0', 'x1'): 3, ('x1', 'x2'): 4}
op.quadratic_constraint({}, quadratic, Constraint.Sense.GE, 3, 'x0x1_x1x2GE')
conv = InequalityToEquality()
op2 = conv.convert(op)
self.assertListEqual([v.name for v in op2.variables],
['x0', 'x1', 'x2', 'x1x2@int_slack', 'x0x2@int_slack',
'x0x1_x1x2LE@int_slack', 'x0x1_x1x2GE@int_slack'])
# For linear constraints
lst = [
op2.linear_constraints[0].linear.to_dict()[0],
op2.linear_constraints[0].linear.to_dict()[1],
]
self.assertListEqual(lst, [1, 1])
self.assertEqual(op2.linear_constraints[0].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[1].linear.to_dict()[1],
op2.linear_constraints[1].linear.to_dict()[2],
op2.linear_constraints[1].linear.to_dict()[3],
]
self.assertListEqual(lst, [1, -1, 1])
lst = [op2.variables[3].lowerbound, op2.variables[3].upperbound]
self.assertListEqual(lst, [0, 8])
self.assertEqual(op2.linear_constraints[1].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[2].linear.to_dict()[0],
op2.linear_constraints[2].linear.to_dict()[2],
op2.linear_constraints[2].linear.to_dict()[4],
]
self.assertListEqual(lst, [1, 3, -1])
lst = [op2.variables[4].lowerbound, op2.variables[4].upperbound]
self.assertListEqual(lst, [0, 10])
self.assertEqual(op2.linear_constraints[2].sense, Constraint.Sense.EQ)
# For quadratic constraints
lst = [
op2.quadratic_constraints[0].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[0].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[0].linear.to_dict()[5],
]
self.assertListEqual(lst, [1, 2, 1])
lst = [op2.variables[5].lowerbound, op2.variables[5].upperbound]
self.assertListEqual(lst, [0, 30])
lst = [
op2.quadratic_constraints[1].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[1].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[1].linear.to_dict()[6],
]
self.assertListEqual(lst, [3, 4, -1])
lst = [op2.variables[6].lowerbound, op2.variables[6].upperbound]
self.assertListEqual(lst, [0, 60])
result = OptimizationResult(x=np.arange(7), fval=0, variables=op2.variables)
new_result = conv.interpret(result)
np.testing.assert_array_almost_equal(new_result.x, np.arange(3))
self.assertListEqual(new_result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(new_result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 2})
def test_inequality_mode_integer(self):
""" Test integer mode of InequalityToEqualityConverter() """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
conv = InequalityToEquality(mode='integer')
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.INTEGER, Variable.Type.INTEGER])
def test_inequality_mode_continuous(self):
""" Test continuous mode of InequalityToEqualityConverter() """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
conv = InequalityToEquality(mode='continuous')
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.CONTINUOUS, Variable.Type.CONTINUOUS])
def test_inequality_mode_auto(self):
""" Test auto mode of InequalityToEqualityConverter() """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1.1, 'x2': 2.2}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 3.3, 'x0x2')
conv = InequalityToEquality(mode='auto')
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.INTEGER, Variable.Type.CONTINUOUS])
def test_penalize_sense(self):
""" Test PenalizeLinearEqualityConstraints with senses """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
with self.assertRaises(QiskitOptimizationError):
conv.convert(op)
def test_penalize_binary(self):
""" Test PenalizeLinearEqualityConstraints with binary variables """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, 'x0x2')
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_linear_constraints(), 0)
result = OptimizationResult(x=np.arange(3), fval=0, variables=op2.variables)
new_result = conv.interpret(result)
self.assertEqual(new_result.status, OptimizationResultStatus.INFEASIBLE)
np.testing.assert_array_almost_equal(new_result.x, np.arange(3))
self.assertListEqual(result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 2})
def test_penalize_integer(self):
""" Test PenalizeLinearEqualityConstraints with integer variables """
op = QuadraticProgram()
for i in range(3):
op.integer_var(name='x{}'.format(i), lowerbound=-3, upperbound=3)
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x2')
op.minimize(constant=3, linear={'x0': 1}, quadratic={('x1', 'x2'): 2})
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_linear_constraints(), 0)
result = OptimizationResult(x=[0, 1, -1], fval=1, variables=op2.variables)
new_result = conv.interpret(result)
self.assertAlmostEqual(new_result.fval, 1)
self.assertEqual(new_result.status, OptimizationResultStatus.SUCCESS)
np.testing.assert_array_almost_equal(new_result.x, [0, 1, -1])
self.assertListEqual(result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(result.variables_dict, {'x0': 0, 'x1': 1, 'x2': -1})
def test_integer_to_binary(self):
""" Test integer to binary """
op = QuadraticProgram()
for i in range(0, 2):
op.binary_var(name='x{}'.format(i))
op.integer_var(name='x2', lowerbound=0, upperbound=5)
linear = {}
for i, x in enumerate(op.variables):
linear[x.name] = i + 1
op.maximize(0, linear, {})
conv = IntegerToBinary()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_vars(), 5)
self.assertListEqual([x.vartype for x in op2.variables], [Variable.Type.BINARY] * 5)
self.assertListEqual([x.name for x in op2.variables], ['x0', 'x1', 'x2@0', 'x2@1', 'x2@2'])
dct = op2.objective.linear.to_dict()
self.assertEqual(dct[2], 3)
self.assertEqual(dct[3], 6)
self.assertEqual(dct[4], 6)
def test_binary_to_integer(self):
""" Test binary to integer """
op = QuadraticProgram()
for i in range(0, 2):
op.binary_var(name='x{}'.format(i))
op.integer_var(name='x2', lowerbound=0, upperbound=5)
linear = {'x0': 1, 'x1': 2, 'x2': 1}
op.maximize(0, linear, {})
linear = {}
for x in op.variables:
linear[x.name] = 1
op.linear_constraint(linear, Constraint.Sense.EQ, 6, 'x0x1x2')
conv = IntegerToBinary()
op2 = conv.convert(op)
result = OptimizationResult(x=[0, 1, 1, 1, 1], fval=17, variables=op2.variables)
new_result = conv.interpret(result)
np.testing.assert_array_almost_equal(new_result.x, [0, 1, 5])
self.assertEqual(new_result.fval, 17)
self.assertListEqual(new_result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(new_result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 5})
def test_optimizationproblem_to_ising(self):
""" Test optimization problem to operators"""
op = QuadraticProgram()
for i in range(4):
op.binary_var(name='x{}'.format(i))
linear = {}
for x in op.variables:
linear[x.name] = 1
op.maximize(0, linear, {})
linear = {}
for i, x in enumerate(op.variables):
linear[x.name] = i + 1
op.linear_constraint(linear, Constraint.Sense.EQ, 3, 'sum1')
penalize = LinearEqualityToPenalty(penalty=1e5)
op2 = penalize.convert(op)
qubitop, offset = op2.to_ising()
self.assertEqual(qubitop, QUBIT_OP_MAXIMIZE_SAMPLE)
self.assertEqual(offset, OFFSET_MAXIMIZE_SAMPLE)
def test_ising_to_quadraticprogram_linear(self):
""" Test optimization problem to operators with linear=True"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
quadratic = QuadraticProgram()
quadratic.from_ising(op, offset, linear=True)
self.assertEqual(quadratic.get_num_vars(), 4)
self.assertEqual(quadratic.get_num_linear_constraints(), 0)
self.assertEqual(quadratic.get_num_quadratic_constraints(), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
linear_matrix = np.zeros((1, 4))
linear_matrix[0, 0] = -500001
linear_matrix[0, 1] = -800001
linear_matrix[0, 2] = -900001
linear_matrix[0, 3] = -800001
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 3] = 2400000
np.testing.assert_array_almost_equal(
quadratic.objective.linear.coefficients.toarray(), linear_matrix
)
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
def test_ising_to_quadraticprogram_quadratic(self):
""" Test optimization problem to operators with linear=False"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
quadratic = QuadraticProgram()
quadratic.from_ising(op, offset, linear=False)
self.assertEqual(quadratic.get_num_vars(), 4)
self.assertEqual(quadratic.get_num_linear_constraints(), 0)
self.assertEqual(quadratic.get_num_quadratic_constraints(), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 0] = -500001
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 1] = -800001
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 2] = -900001
quadratic_matrix[2, 3] = 2400000
quadratic_matrix[3, 3] = -800001
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
def test_continuous_variable_decode(self):
""" Test decode func of IntegerToBinaryConverter for continuous variables"""
try:
mdl = Model('test_continuous_varable_decode')
c = mdl.continuous_var(lb=0, ub=10.9, name='c')
x = mdl.binary_var(name='x')
mdl.maximize(c + x * x)
op = QuadraticProgram()
op.from_docplex(mdl)
converter = IntegerToBinary()
op = converter.convert(op)
admm_params = ADMMParameters()
qubo_optimizer = MinimumEigenOptimizer(NumPyMinimumEigensolver())
continuous_optimizer = CplexOptimizer()
solver = ADMMOptimizer(
qubo_optimizer=qubo_optimizer,
continuous_optimizer=continuous_optimizer,
params=admm_params,
)
result = solver.solve(op)
result = converter.interpret(result)
self.assertEqual(result.x[0], 10.9)
self.assertListEqual(result.variable_names, ['c', 'x'])
self.assertDictEqual(result.variables_dict, {'c': 10.9, 'x': 0})
except NameError as ex:
self.skipTest(str(ex))
def test_auto_penalty(self):
""" Test auto penalty function"""
op = QuadraticProgram()
op.binary_var('x')
op.binary_var('y')
op.binary_var('z')
op.minimize(constant=3, linear={'x': 1}, quadratic={('x', 'y'): 2})
op.linear_constraint(linear={'x': 1, 'y': 1, 'z': 1}, sense='EQ', rhs=2, name='xyz_eq')
lineq2penalty = LinearEqualityToPenalty(penalty=1e5)
lineq2penalty_auto = LinearEqualityToPenalty()
qubo = lineq2penalty.convert(op)
qubo_auto = lineq2penalty_auto.convert(op)
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
result = exact.solve(qubo)
result_auto = exact.solve(qubo_auto)
self.assertEqual(result.fval, result_auto.fval)
np.testing.assert_array_almost_equal(result.x, result_auto.x)
def test_auto_penalty_warning(self):
""" Test warnings of auto penalty function"""
op = QuadraticProgram()
op.binary_var('x')
op.binary_var('y')
op.binary_var('z')
op.minimize(linear={'x': 1, 'y': 2})
op.linear_constraint(linear={'x': 0.5, 'y': 0.5, 'z': 0.5}, sense='EQ', rhs=1, name='xyz')
with self.assertLogs('qiskit.optimization', level='WARNING') as log:
lineq2penalty = LinearEqualityToPenalty()
_ = lineq2penalty.convert(op)
warning = (
'WARNING:qiskit.optimization.converters.linear_equality_to_penalty:'
'Warning: Using 100000.000000 for the penalty coefficient because a float '
'coefficient exists in constraints. \nThe value could be too small. If so, '
'set the penalty coefficient manually.'
)
self.assertIn(warning, log.output)
def test_linear_equality_to_penalty_decode(self):
""" Test decode func of LinearEqualityToPenalty"""
qprog = QuadraticProgram()
qprog.binary_var('x')
qprog.binary_var('y')
qprog.binary_var('z')
qprog.maximize(linear={'x': 3, 'y': 1, 'z': 1})
qprog.linear_constraint(linear={'x': 1, 'y': 1, 'z': 1}, sense='EQ', rhs=2, name='xyz_eq')
lineq2penalty = LinearEqualityToPenalty()
qubo = lineq2penalty.convert(qprog)
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
result = exact.solve(qubo)
decoded_result = lineq2penalty.interpret(result)
self.assertEqual(decoded_result.fval, 4)
np.testing.assert_array_almost_equal(decoded_result.x, [1, 1, 0])
self.assertEqual(decoded_result.status, OptimizationResultStatus.SUCCESS)
self.assertListEqual(decoded_result.variable_names, ['x', 'y', 'z'])
self.assertDictEqual(decoded_result.variables_dict, {'x': 1.0, 'y': 1.0, 'z': 0.0})
infeasible_result = OptimizationResult(x=[1, 1, 1], fval=0, variables=qprog.variables)
decoded_infeasible_result = lineq2penalty.interpret(infeasible_result)
self.assertEqual(decoded_infeasible_result.fval, 5)
np.testing.assert_array_almost_equal(decoded_infeasible_result.x, [1, 1, 1])
self.assertEqual(decoded_infeasible_result.status, OptimizationResultStatus.INFEASIBLE)
self.assertListEqual(infeasible_result.variable_names, ['x', 'y', 'z'])
self.assertDictEqual(infeasible_result.variables_dict, {'x': 1.0, 'y': 1.0, 'z': 1.0})
def test_empty_problem_deprecated(self):
""" Test empty problem """
op = QuadraticProgram()
conv = InequalityToEquality()
op = conv.encode(op)
conv = IntegerToBinary()
op = conv.encode(op)
conv = LinearEqualityToPenalty()
op = conv.encode(op)
conv = QuadraticProgramToIsing()
_, shift = conv.encode(op)
self.assertEqual(shift, 0.0)
def test_valid_variable_type_deprecated(self):
"""Validate the types of the variables for QuadraticProgramToIsing."""
# Integer variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.integer_var(0, 10, "int_var")
conv = QuadraticProgramToIsing()
_ = conv.encode(op)
# Continuous variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.continuous_var(0, 10, "continuous_var")
conv = QuadraticProgramToIsing()
_ = conv.encode(op)
def test_optimizationproblem_to_ising_deprecated(self):
""" Test optimization problem to operators"""
op = QuadraticProgram()
for i in range(4):
op.binary_var(name='x{}'.format(i))
linear = {}
for x in op.variables:
linear[x.name] = 1
op.maximize(0, linear, {})
linear = {}
for i, x in enumerate(op.variables):
linear[x.name] = i + 1
op.linear_constraint(linear, Constraint.Sense.EQ, 3, 'sum1')
penalize = LinearEqualityToPenalty(penalty=1e5)
op2ope = QuadraticProgramToIsing()
op2 = penalize.encode(op)
qubitop, offset = op2ope.encode(op2)
self.assertEqual(qubitop, QUBIT_OP_MAXIMIZE_SAMPLE)
self.assertEqual(offset, OFFSET_MAXIMIZE_SAMPLE)
def test_ising_to_quadraticprogram_linear_deprecated(self):
""" Test optimization problem to operators with linear=True"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
op2qp = IsingToQuadraticProgram(linear=True)
quadratic = op2qp.encode(op, offset)
self.assertEqual(len(quadratic.variables), 4)
self.assertEqual(len(quadratic.linear_constraints), 0)
self.assertEqual(len(quadratic.quadratic_constraints), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
linear_matrix = np.zeros((1, 4))
linear_matrix[0, 0] = -500001
linear_matrix[0, 1] = -800001
linear_matrix[0, 2] = -900001
linear_matrix[0, 3] = -800001
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 3] = 2400000
np.testing.assert_array_almost_equal(
quadratic.objective.linear.coefficients.toarray(), linear_matrix
)
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
def test_ising_to_quadraticprogram_quadratic_deprecated(self):
""" Test optimization problem to operators with linear=False"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
op2qp = IsingToQuadraticProgram(linear=False)
quadratic = op2qp.encode(op, offset)
self.assertEqual(len(quadratic.variables), 4)
self.assertEqual(len(quadratic.linear_constraints), 0)
self.assertEqual(len(quadratic.quadratic_constraints), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 0] = -500001
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 1] = -800001
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 2] = -900001
quadratic_matrix[2, 3] = 2400000
quadratic_matrix[3, 3] = -800001
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
c885620223bab7b3b759d52fbf738145d6690444 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/rtctrl/setrtmetricdef.py | f5d55b1458f3e0a5d0f447271471db818060c777 | []
| no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,969 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class SetRtMetricDef(Mo):
"""
The set route metric definition.
"""
meta = ClassMeta("cobra.model.rtctrl.SetRtMetricDef")
meta.moClassName = "rtctrlSetRtMetricDef"
meta.rnFormat = "smetric"
meta.category = MoCategory.REGULAR
meta.label = "None"
meta.writeAccessMask = 0x1000001
meta.readAccessMask = 0x1000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.rtctrl.AttrDef")
meta.superClasses.add("cobra.model.pol.Comp")
meta.superClasses.add("cobra.model.rtctrl.ASetRule")
meta.superClasses.add("cobra.model.fabric.L3ProtoComp")
meta.superClasses.add("cobra.model.fabric.ProtoComp")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.rtctrl.ASetRtMetric")
meta.rnPrefixes = [
('smetric', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "metric", "metric", 795, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4294967295)]
meta.props.add("metric", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 794, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 5
prop.defaultValueStr = "metric"
prop._addConstant("as-path", "as-path", 11)
prop._addConstant("community", "community", 1)
prop._addConstant("dampening-pol", "dampening-type", 10)
prop._addConstant("ip-nh", "ip-nexthop", 8)
prop._addConstant("local-pref", "local-preference", 4)
prop._addConstant("metric", "metric", 5)
prop._addConstant("metric-type", "metric-type", 9)
prop._addConstant("ospf-fwd-addr", "ospf-fowarding-address", 7)
prop._addConstant("ospf-nssa", "ospf-nssa-area", 6)
prop._addConstant("rt-tag", "route-tag", 2)
prop._addConstant("rt-weight", "route-weight", 3)
meta.props.add("type", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
1bd7a906e3ae3f165ff81b9124b97e9187f5bcc5 | e5202e0f36c15b8898920a461a866168fa059947 | /clirad/n2o_3.2e-07/band_3/atmpro_mls/cliradlw_1013f91/param.py | 33924bb8f70d78d9230fd85569d18e1186ba5363 | []
| no_license | qAp/analysis_-_new_kdist_param | 653c9873751646f6fa9481544e98ed6065a16155 | 272dc3667030cdb18664108d0bd78fee03736144 | refs/heads/master | 2021-06-11T04:21:35.105924 | 2019-08-04T13:13:07 | 2019-08-04T13:13:07 | 136,108,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | PARAM = {'commitnumber': '1013f91', 'band': [3], 'molecule': {'n2o': 3.2e-07}, 'atmpro': 'mls', 'tsfc': 294}
PARAM_LBLNEW = {'atmpro': 'mls', 'band': '3a', 'commitnumber': '5014a19', 'conc': 3.2e-07, 'dv': 0.001, 'klin': 2.22e-20, 'molecule': 'n2o', 'ng_adju': [0, 0], 'ng_refs': [1, 2], 'nv': 1000, 'option_compute_btable': 0, 'option_compute_ktable': 0, 'option_wgt_flux': 1, 'option_wgt_k': 1, 'ref_pts': [[1, 250], [500, 250]], 'tsfc': 294, 'vmax': 620, 'vmin': 540, 'w_diffuse': [[1.8], [1.66, 1.8]], 'wgt': [[0.9], [0.5, 0.95]]} | [
"[email protected]"
]
| |
135f69897b740742d615a59e60256e99b761d86d | 1346ea1f255d3586442c8fc1afc0405794206e26 | /알고리즘/day24/babygincompare.py | 0506c4570d0fcac76a84ab75a16604fe95dd74ec | []
| no_license | Yun-Jongwon/TIL | 737b634b6e75723ac0043cda9c4f9acbc2a24686 | a3fc624ec340643cdbf98974bf6e6144eb06a42f | refs/heads/master | 2020-04-12T00:41:03.985080 | 2019-05-01T07:55:25 | 2019-05-01T07:55:25 | 162,208,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,649 | py | def player1babygin():
for i in range(len(player1_data)-2):
for j in range(i+1,len(player1_data)-1):
for k in range(j+1,len(player1_data)):
candi=sorted([player1_data[i],player1_data[j],player1_data[k]])
if (candi[1]-1==candi[0] and candi[1]+1== candi[2]) or (candi[0]==candi[1] and candi[1]==candi[2]):
# print(candi)
return 1
return 0
def player2babygin():
for i in range(len(player2_data)-2):
for j in range(i+1,len(player2_data)-1):
for k in range(j+1,len(player2_data)):
candi=sorted([player2_data[i],player2_data[j],player2_data[k]])
if (candi[1]-1==candi[0] and candi[1]+1== candi[2]) or (candi[0]==candi[1] and candi[1]==candi[2]):
return 2
return 0
T=int(input())
for t in range(T):
data=list(map(int,input().split()))
player1_data=[]
player2_data=[]
player1=0
player2=0
result=0
for d in range(len(data)):
if d%2==0:
player1_data.append(data[d])
# print(player1_data)
else:
player2_data.append(data[d])
# print(player2_data)
if d>=4:
if len(player2_data)>=3:
player1=player1babygin()
player2=player2babygin()
else:
player1babygin()
if player1==1 and (player2==0 or player2==2):
result=1
break
elif player1==0 and player2==2:
result=2
break
print('#{} {}'.format(t+1,result))
| [
"[email protected]"
]
| |
1c25fdc3c71bd1c13e880d528341cc4b0e788efd | f54d702c1289b2b78f423850d7fedba6c9378126 | /Mathematics/Fundamentals/handshake.py | b505905cd0327f05e06069e006057674fa76dc6a | [
"MIT"
]
| permissive | ekant1999/HackerRank | 81e6ac5bec8307bca2bd1debb169f2acdf239b66 | 084d4550b4eaf130837ab26a4efdbcaf8b667cdc | refs/heads/master | 2020-05-02T09:19:10.102144 | 2016-10-27T04:10:28 | 2016-10-27T04:10:28 | 177,868,424 | 0 | 0 | MIT | 2019-03-26T21:04:17 | 2019-03-26T21:04:17 | null | UTF-8 | Python | false | false | 243 | py | # Python 2
# Enter your code here. Read input from STDIN. Print output to STDOUT
t = int(raw_input())
for i in range(t):
n = int(raw_input())
handshakes = n*(n-1)/2 # Note this is nC2 i.e. n "choose" 2
print handshakes | [
"[email protected]"
]
| |
d6421366ead0444f243530ea7171288c4fd74f01 | 3f85a2b5ebaf040d295bd5d98c49b59e9ea82643 | /extract_delf.py | d8911a4ee35ad704974c37e18e3ef631c5868f09 | [
"Apache-2.0"
]
| permissive | vcg-uvic/image-matching-benchmark-baselines | 6b69d0db384c4af90b431f421077aa0f8e1ec04f | 01510c4d2c07cad89727013241a359bb22689a1b | refs/heads/master | 2021-01-04T00:35:04.375020 | 2020-10-01T17:19:54 | 2020-10-01T17:19:54 | 292,169,250 | 19 | 1 | Apache-2.0 | 2020-10-01T17:19:56 | 2020-09-02T03:29:45 | null | UTF-8 | Python | false | false | 9,403 | py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Forked from:
# https://github.com/tensorflow/models/blob/master/research/delf/delf/python/examples/extract_features.py
"""Extracts DELF features from a list of images, saving them to file.
The images must be in JPG format. The program checks if descriptors already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
import json
import numpy as np
import h5py
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.platform import app
from delf import delf_config_pb2
from delf import feature_extractor
from delf import feature_io
cmd_args = None
# Extension of feature files.
_DELF_EXT = '.h5'
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def MakeExtractor(sess, config, import_scope=None):
"""Creates a function to extract features from an image.
Args:
sess: TensorFlow session to use.
config: DelfConfig proto containing the model configuration.
import_scope: Optional scope to use for model.
Returns:
Function that receives an image and returns features.
"""
tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING],
config.model_path,
import_scope=import_scope)
import_scope_prefix = import_scope + '/' if import_scope is not None else ''
input_image = sess.graph.get_tensor_by_name('%sinput_image:0' %
import_scope_prefix)
input_score_threshold = sess.graph.get_tensor_by_name(
'%sinput_abs_thres:0' % import_scope_prefix)
input_image_scales = sess.graph.get_tensor_by_name('%sinput_scales:0' %
import_scope_prefix)
input_max_feature_num = sess.graph.get_tensor_by_name(
'%sinput_max_feature_num:0' % import_scope_prefix)
boxes = sess.graph.get_tensor_by_name('%sboxes:0' % import_scope_prefix)
raw_descriptors = sess.graph.get_tensor_by_name('%sfeatures:0' %
import_scope_prefix)
feature_scales = sess.graph.get_tensor_by_name('%sscales:0' %
import_scope_prefix)
attention_with_extra_dim = sess.graph.get_tensor_by_name(
'%sscores:0' % import_scope_prefix)
attention = tf.reshape(attention_with_extra_dim,
[tf.shape(attention_with_extra_dim)[0]])
locations, descriptors = feature_extractor.DelfFeaturePostProcessing(
boxes, raw_descriptors, config)
def ExtractorFn(image):
"""Receives an image and returns DELF features.
Args:
image: Uint8 array with shape (height, width 3) containing the RGB image.
Returns:
Tuple (locations, descriptors, feature_scales, attention)
"""
return sess.run([locations, descriptors, feature_scales, attention],
feed_dict={
input_image: image,
input_score_threshold:
config.delf_local_config.score_threshold,
input_image_scales: list(config.image_scales),
input_max_feature_num:
config.delf_local_config.max_feature_num
})
return ExtractorFn
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Read list of images.
tf.logging.info('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
tf.logging.info('done! Found %d images', num_images)
# Parse DelfConfig proto.
config = delf_config_pb2.DelfConfig()
with tf.gfile.FastGFile(cmd_args.config_path, 'r') as f:
text_format.Merge(f.read(), config)
# Create output directory if necessary.
if not os.path.exists(cmd_args.output_dir):
os.makedirs(cmd_args.output_dir)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Reading list of images.
filename_queue = tf.train.string_input_producer(
image_paths, shuffle=False)
reader = tf.WholeFileReader()
_, value = reader.read(filename_queue)
image_tf = tf.image.decode_jpeg(value, channels=3)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
extractor_fn = MakeExtractor(sess, config)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
start = time.clock()
with h5py.File(os.path.join(cmd_args.output_dir, 'keypoints.h5'), 'w') as h5_kp, \
h5py.File(os.path.join(cmd_args.output_dir, 'descriptors.h5'), 'w') as h5_desc, \
h5py.File(os.path.join(cmd_args.output_dir, 'scores.h5'), 'w') as h5_score, \
h5py.File(os.path.join(cmd_args.output_dir, 'scales.h5'), 'w') as h5_scale:
for i in range(num_images):
key = os.path.splitext(os.path.basename(image_paths[i]))[0]
print('Processing "{}"'.format(key))
# Write to log-info once in a while.
if i == 0:
tf.logging.info(
'Starting to extract DELF features from images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.clock() - start)
tf.logging.info(
'Processing image %d out of %d, last %d '
'images took %f seconds', i, num_images,
_STATUS_CHECK_ITERATIONS, elapsed)
start = time.clock()
# # Get next image.
im = sess.run(image_tf)
# If descriptor already exists, skip its computation.
# out_desc_filename = os.path.splitext(os.path.basename(
# image_paths[i]))[0] + _DELF_EXT
# out_desc_fullpath = os.path.join(cmd_args.output_dir, out_desc_filename)
# if tf.gfile.Exists(out_desc_fullpath):
# tf.logging.info('Skipping %s', image_paths[i])
# continue
# Extract and save features.
(locations_out, descriptors_out, feature_scales_out,
attention_out) = extractor_fn(im)
# np.savez('{}.npz'.format(config.delf_local_config.max_feature_num), keypoints=locations_out)
# feature_io.WriteToFile(out_desc_fullpath, locations_out,
# feature_scales_out, descriptors_out,
# attention_out)
h5_kp[key] = locations_out[:, ::-1]
h5_desc[key] = descriptors_out
h5_scale[key] = feature_scales_out
h5_score[key] = attention_out
# Finalize enqueue threads.
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--config_path',
type=str,
default='misc/delf/delf_config_example.pbtxt',
help="""
Path to DelfConfig proto text file with configuration to be used for DELF
extraction.
""")
parser.add_argument(
'--list_images_path',
type=str,
help="""
Path to list of images whose DELF features will be extracted.
""")
parser.add_argument(
'--output_dir',
type=str,
default='../benchmark-features/delf',
help="""
Directory where DELF features will be written to. Each image's features
will be written to a file with same name, and extension replaced by .delf.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"[email protected]"
]
| |
53fc8b0f4b4edd08561c3807270f46137ef31875 | fdedcfc5242a375bb08e0ec7e206d5560ce36f65 | /mmctools/windtools/windtools/SOWFA6/postProcessing/probeSets.py | a56818c48f189c242a32d90f566f12115d14a425 | [
"Apache-2.0"
]
| permissive | DriesAllaerts/mmctools | 2069fe02e0c7417cfbf6762d2db6646deb43123c | b5f88556c1df3935d1d36260c59e375423df6f1d | refs/heads/master | 2022-12-01T13:56:32.192494 | 2022-09-14T03:10:21 | 2022-09-14T03:10:21 | 187,232,582 | 0 | 0 | Apache-2.0 | 2019-05-29T20:19:52 | 2019-05-17T14:40:10 | null | UTF-8 | Python | false | false | 12,448 | py | # Copyright 2020 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
Class for reading in `set` type of OpenFOAM sampling 'probes'
written by Regis Thedin ([email protected])
"""
from __future__ import print_function
import os
import pandas as pd
import numpy as np
from .reader import Reader
class ProbeSets(Reader):
"""Stores a time array (t), and field arrays as attributes. The
fields have shape:
(Nt, N[, Nd])
where N is the number of probes and Nt is the number of samples.
Vectors have an additional dimension to denote vector components.
Symmetric tensors have an additional dimension to denote tensor components (xx, xy, xz, yy, yz, zz).
The `set`-type of probe is used when large number of data points need to be saved.
Therefore, this class differs from `Probe` and is tailored for the specification of
many sets and looping through the files with ease. The inputs of this class were created
to make it easy to accomodate very large datasets, or only read a subset of the saved data.
If the need of using `set` arises, chances are the naming of the probes will be complex and likely
inlcude a sweep of a variable in its name. Due to that, the user can specify the name of the probes
split into prefix, suffix, variable sweep, and variables to save. It is also possible to specify a
sub-domain in which data is needed. It is assumed that all sets have the same points.
Sample usage:
from windtools.SOWFA6.postProcessing.probeSets import ProbeSets
# read all times, all variables
probeData = ProbeSet('path/to/case/postProcessing/probeName')
# read specified fields
probeData = ProbeSet('path/to/case/PostProcessing/probeName', varList['U','T'])
# read specified sub-domain
probeData = ProbeSet('path/to/case/postProcessing/probeName', xi=-2500, xf=2500, yi=-2500, yf=2500)
# read all and account for added perturbation on the sampling points
probeData = ProbeSet('path/to/case/postProcessing/probeName', posPert=-0.01)
# read specified time dirs
probeData = ProbeSet('path/to/case/postProcessing/probeName', tstart=30000, tend=30100)
# read certain files following complex naming convention
# e.g. if the probes are specified as
```
probeName
{
type sets;
name pointcloud;
// other settings...
fields ( U T );
sets
(
vmasts_h10
{
type points;
// ...
}
vmasts_h20
{
// ...
}
// ...
)
}
```
# and the user wishes to read to vmasts_h{10,50}_{T,U}.xy, then:
probeData = ProbeSet('path/to/case/postProcessing/probeName',
fprefix='vmasts_h', fparam=['10','50'], varList=['T','U'], fsuffix='.xy')
Notes:
- If `varList` is not specified, then all the probes are read, ignoring prefix, sufix, and parameters
- Pandas/dataframe is used internally even though the final object is of `Reader` type.
"""
def __init__(self, dpath=None, tstart=None, tend=None, varList='all', posPert=0.0,
xi=None, xf=None, yi=None, yf=None,
fprefix=None, fparam=None, fsuffix=None,
**kwargs):
self.xi = xi
self.xf = xf
self.yi = yi
self.yf = yf
self.fprefix = fprefix
self.fparam = fparam
self.fsuffix = fsuffix
self.posPert = posPert
self.tstart = tstart
self.tend = tend
self.varList = varList
self._allVars = {'U','UMean','T','TMean','TPrimeUPrimeMean','UPrime2Mean','p_rgh'}
super().__init__(dpath,includeDt=True,**kwargs)
def _trimtimes(self,tdirList, tstart=None,tend=None):
if (tstart is not None) or (tend is not None):
if tstart is None: tstart = 0.0
if tend is None: tend = 9e9
selected = [ (t >= tstart) & (t <= tend) for t in self.times ]
self.filelist = [tdirList[i] for i,b in enumerate(selected) if b ]
self.times = [self.times[i] for i,b in enumerate(selected) if b ]
self.Ntimes = len(self.times)
try:
tdirList = [tdirList[i] for i,b in enumerate(selected) if b ]
except AttributeError:
pass
return tdirList
def _processdirs(self, tdirList, trimOverlap=False, **kwargs):
print('Probe data saved:',len(self.simStartTimes), 'time steps, from', \
self.simStartTimes[0],'s to',self.simStartTimes[-1],'s')
# make varList iterable if not already a list
varList = [self.varList] if not isinstance(self.varList, (list)) else self.varList
# Create a list of all the probe files that will be processed
if varList[0].lower()=='all':
print('No varList given. Reading all probes.')
outputs = [ fname for fname in os.listdir(tdirList[0])
if os.path.isfile(tdirList[0]+os.sep+fname) ]
else:
# Make values iterable if not specified list
fprefix = [self.fprefix] if not isinstance(self.fprefix, (list)) else self.fprefix
fparam = [self.fparam] if not isinstance(self.fparam, (list)) else self.fparam
fsuffix = [self.fsuffix] if not isinstance(self.fsuffix, (list)) else self.fsuffix
# create a varList that contains all the files names
fileList = []
for var in varList:
for prefix in fprefix:
for param in fparam:
for suffix in fsuffix:
fileList.append( prefix + param + '_' + var + suffix )
outputs = fileList
# Get list of times and trim the data
self.times = [float(os.path.basename(p)) for p in self.simTimeDirs]
tdirList = self._trimtimes(tdirList,self.tstart,self.tend)
try:
print('Probe data requested:',len(tdirList), 'time steps, from', \
float(os.path.basename(tdirList[0])),'s to', \
float(os.path.basename(tdirList[-1])),'s')
except IndexError:
raise ValueError('End time needs to be greater than the start time')
# Raise an error if list is empty
if not tdirList:
raise ValueError('No time directories found')
# Process all data
for field in outputs:
arrays = [ self._read_data( tdir,field ) for tdir in tdirList ]
# combine into a single array and trim end of time series
arrays = np.concatenate(arrays)[:self.imax,:]
# parse the name to create the right variable
param, var = self._parseProbeName(field)
# add the zagl to the array
arrays = np.hstack((arrays[:,:4], \
np.full((arrays.shape[0],1),param), \
arrays[:,4:]))
# append to (or create) a variable attribute
try:
setattr(self,var,np.concatenate((getattr(self,var),arrays)))
except AttributeError:
setattr( self, var, arrays )
if not var in self._processed:
self._processed.append(var)
print(' read',field)
self.t = np.unique(arrays[:,0])
self.Nt = len(self.t)
# sort times
for var in self._allVars:
try:
self.var = self.var[np.argsort(self.var[:,0])]
except AttributeError:
pass
def _parseProbeName(self, field):
# Example: get 'vmasts_50mGrid_h30_T.xy' and return param=30, var='T'
# Remove the prefix from the full field name
f = field.replace(self.fprefix,'')
# Substitude the first underscore with a dot and split array
f = f.replace('_','.',1).split('.')
for i in set(f).intersection(self._allVars):
var = i
param = int(f[-3])
return param, var
def _read_data(self, dpath, fname):
fpath = dpath + os.sep + fname
currentTime = float(os.path.basename(dpath))
with open(fpath) as f:
try:
# read the actual data from probes
array = self._read_probe_posAndData(f)
# add current time step info to first column
array = np.c_[np.full(array.shape[0],currentTime), array]
except IOError:
print('unable to read '+ fpath)
return array
def _read_probe_posAndData(self,f):
out = []
# Pandas is a LOT faster than reading the file line by line
out = pd.read_csv(f.name,header=None,comment='#',sep='\t')
# Add position perturbation to x, y, zabs
out[[0,1,2]] = out[[0,1,2]].add(self.posPert)
# clip spatial data
out = self._trimpositions(out, self.xi, self.xf, self.yi, self.yf)
out = out.to_numpy(dtype=float)
self.N = len(out)
return out
def _trimpositions(self, df, xi=None,xf=None, yi=None, yf=None):
if (xi is not None) and (xf is not None):
df = df.loc[ (df[0]>=xi) & (df[0]<=xf) ]
elif xi is not None:
df = df.loc[ df[0]>=xi ]
elif xf is not None:
df = df.loc[ df[0]<=xf ]
if (yi is not None) and (yf is not None):
df = df.loc[ (df[1]>=yi) & (df[1]<=yf) ]
elif yi is not None:
df = df.loc[ df[1]>=yi ]
elif yf is not None:
df = df.loc[ df[1]<=yf ]
return df
#============================================================================
#
# DATA I/O
#
#============================================================================
def to_pandas(self,itime=None,fields=None,dtype=None):
#output all vars
if fields is None:
fields = self._processed
# select time range
if itime is None:
tindices = range(len(self.t))
else:
try:
iter(itime)
except TypeError:
# specified single time index
tindices = [itime]
else:
# specified list of indices
tindices = itime
# create dataframes for each field
print('Creating dataframe ...')
data = {}
for var in fields:
print('processing', var)
F = getattr(self,var)
# Fill in data
data['time'] = F[:,0]
data['x'] = F[:,1]
data['y'] = F[:,2]
data['zabs'] = F[:,3]
data['zagl'] = F[:,4]
if F.shape[1]==6:
# scalar
data[var] = F[:,5:].flatten()
elif F.shape[1]==8:
# vector
for j,name in enumerate(['x','y','z']):
data[var+name] = F[:,5+j].flatten()
elif F.shape[1]==11:
# symmetric tensor
for j,name in enumerate(['xx','xy','xz','yy','yz','zz']):
data[var+name] = F[:,5+j].flatten()
df = pd.DataFrame(data=data,dtype=dtype)
return df.sort_values(['time','x','y','zabs','zagl']).set_index(['time','x','y','zagl'])
def to_netcdf(self,fname,fieldDescriptions={},fieldUnits={}):
raise NotImplementedError('Not available for ProbeSet class.')
| [
"[email protected]"
]
| |
0ffe61f0c5fc6dd5c9c0e340692739b892566dc0 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/Juniper-TSM-CONF.py | 71fad16c3643ce9206c5564ee369544ce182b392 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 3,237 | py | #
# PySNMP MIB module Juniper-TSM-CONF (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-TSM-CONF
# Produced by pysmi-0.3.4 at Wed May 1 14:04:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
juniAgents, = mibBuilder.importSymbols("Juniper-Agents", "juniAgents")
AgentCapabilities, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, MibIdentifier, TimeTicks, NotificationType, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Counter32, Integer32, Gauge32, Unsigned32, Counter64, Bits, iso = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "MibIdentifier", "TimeTicks", "NotificationType", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Counter32", "Integer32", "Gauge32", "Unsigned32", "Counter64", "Bits", "iso")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
juniTsmAgent = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 5, 2, 67))
juniTsmAgent.setRevisions(('2003-10-27 22:50',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: juniTsmAgent.setRevisionsDescriptions(('The initial release of this management information module.',))
if mibBuilder.loadTexts: juniTsmAgent.setLastUpdated('200310272250Z')
if mibBuilder.loadTexts: juniTsmAgent.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: juniTsmAgent.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford, MA 01886-3146 USA Tel: +1 978 589 5800 E-mail: [email protected]')
if mibBuilder.loadTexts: juniTsmAgent.setDescription('The agent capabilities definitions for the Terminal Server Management (TSM) component of the SNMP agent in the Juniper E-series family of products.')
juniTsmAgentV1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 67, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniTsmAgentV1 = juniTsmAgentV1.setProductRelease('Version 1 of the Terminal Server Management (TSM) component of the\n JUNOSe SNMP agent. This version of the TSM component is supported in\n JUNOSe 5.3 and subsequent system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniTsmAgentV1 = juniTsmAgentV1.setStatus('current')
if mibBuilder.loadTexts: juniTsmAgentV1.setDescription('The MIB supported by the JUNOSe SNMP agent for the TSM application.')
mibBuilder.exportSymbols("Juniper-TSM-CONF", PYSNMP_MODULE_ID=juniTsmAgent, juniTsmAgent=juniTsmAgent, juniTsmAgentV1=juniTsmAgentV1)
| [
"[email protected]"
]
| |
501ce999fd6452c28544240627deb50e62312876 | fce83f1b55b8894afab9eb58ae8b4ba2e26eb86b | /examples/GAN/DCGAN.py | e9df6b36319476aea07fd240e26005c998a75385 | [
"Apache-2.0"
]
| permissive | PeisenZhao/tensorpack | b65d451f6d4a7fe1af1e183bdc921c912f087586 | 6ca57de47e4a76b57c8aa2f0dad87c1059c13ac0 | refs/heads/master | 2021-05-05T01:46:05.209522 | 2018-01-31T05:29:37 | 2018-01-31T05:29:37 | 119,641,372 | 1 | 0 | null | 2018-01-31T05:52:07 | 2018-01-31T05:52:06 | null | UTF-8 | Python | false | false | 5,554 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: DCGAN.py
# Author: Yuxin Wu <[email protected]>
import glob
import numpy as np
import os
import argparse
from tensorpack import *
from tensorpack.utils.viz import stack_patches
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
from tensorpack.utils.globvars import globalns as opt
import tensorflow as tf
from GAN import GANTrainer, RandomZData, GANModelDesc
"""
1. Download the 'aligned&cropped' version of CelebA dataset
from http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
2. Start training:
./DCGAN-CelebA.py --data /path/to/img_align_celeba/ --crop-size 140
Generated samples will be available through tensorboard
3. Visualize samples with an existing model:
./DCGAN-CelebA.py --load path/to/model --sample
You can also train on other images (just use any directory of jpg files in
`--data`). But you may need to change the preprocessing.
A pretrained model on CelebA is at http://models.tensorpack.com/GAN/
"""
# global vars
opt.SHAPE = 64
opt.BATCH = 128
opt.Z_DIM = 100
class Model(GANModelDesc):
def _get_inputs(self):
return [InputDesc(tf.float32, (None, opt.SHAPE, opt.SHAPE, 3), 'input')]
def generator(self, z):
""" return an image generated from z"""
nf = 64
l = FullyConnected('fc0', z, nf * 8 * 4 * 4, nl=tf.identity)
l = tf.reshape(l, [-1, 4, 4, nf * 8])
l = BNReLU(l)
with argscope(Deconv2D, nl=BNReLU, kernel_shape=4, stride=2):
l = Deconv2D('deconv1', l, nf * 4)
l = Deconv2D('deconv2', l, nf * 2)
l = Deconv2D('deconv3', l, nf)
l = Deconv2D('deconv4', l, 3, nl=tf.identity)
l = tf.tanh(l, name='gen')
return l
@auto_reuse_variable_scope
def discriminator(self, imgs):
""" return a (b, 1) logits"""
nf = 64
with argscope(Conv2D, nl=tf.identity, kernel_shape=4, stride=2):
l = (LinearWrap(imgs)
.Conv2D('conv0', nf, nl=tf.nn.leaky_relu)
.Conv2D('conv1', nf * 2)
.BatchNorm('bn1')
.tf.nn.leaky_relu()
.Conv2D('conv2', nf * 4)
.BatchNorm('bn2')
.tf.nn.leaky_relu()
.Conv2D('conv3', nf * 8)
.BatchNorm('bn3')
.tf.nn.leaky_relu()
.FullyConnected('fct', 1, nl=tf.identity)())
return l
def _build_graph(self, inputs):
image_pos = inputs[0]
image_pos = image_pos / 128.0 - 1
z = tf.random_uniform([opt.BATCH, opt.Z_DIM], -1, 1, name='z_train')
z = tf.placeholder_with_default(z, [None, opt.Z_DIM], name='z')
with argscope([Conv2D, Deconv2D, FullyConnected],
W_init=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
image_gen = self.generator(z)
tf.summary.image('generated-samples', image_gen, max_outputs=30)
with tf.variable_scope('discrim'):
vecpos = self.discriminator(image_pos)
vecneg = self.discriminator(image_gen)
self.build_losses(vecpos, vecneg)
self.collect_variables()
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False)
return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3)
def get_augmentors():
augs = []
if opt.load_size:
augs.append(imgaug.Resize(opt.load_size))
if opt.crop_size:
augs.append(imgaug.CenterCrop(opt.crop_size))
augs.append(imgaug.Resize(opt.SHAPE))
return augs
def get_data(datadir):
imgs = glob.glob(datadir + '/*.jpg')
ds = ImageFromFile(imgs, channel=3, shuffle=True)
ds = AugmentImageComponent(ds, get_augmentors())
ds = BatchData(ds, opt.BATCH)
ds = PrefetchDataZMQ(ds, 5)
return ds
def sample(model, model_path, output_name='gen/gen'):
pred = PredictConfig(
session_init=get_model_loader(model_path),
model=model,
input_names=['z'],
output_names=[output_name, 'z'])
pred = SimpleDatasetPredictor(pred, RandomZData((100, opt.Z_DIM)))
for o in pred.get_result():
o = o[0] + 1
o = o * 128.0
o = np.clip(o, 0, 255)
o = o[:, :, :, ::-1]
stack_patches(o, nr_row=10, nr_col=10, viz=True)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--sample', action='store_true', help='view generated examples')
parser.add_argument('--data', help='a jpeg directory')
parser.add_argument('--load-size', help='size to load the original images', type=int)
parser.add_argument('--crop-size', help='crop the original images', type=int)
args = parser.parse_args()
opt.use_argument(args)
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
return args
if __name__ == '__main__':
args = get_args()
if args.sample:
sample(Model(), args.load)
else:
assert args.data
logger.auto_set_dir()
GANTrainer(
input=QueueInput(get_data(args.data)),
model=Model()).train_with_defaults(
callbacks=[ModelSaver()],
steps_per_epoch=300,
max_epoch=200,
session_init=SaverRestore(args.load) if args.load else None
)
| [
"[email protected]"
]
| |
df113094854ba04a033632a46969612a2810a824 | aef40813a1b92cec0ea4fc25ec1d4a273f9bfad4 | /Q03__/04_Range_Sum_Query_2D_Immutable/Solution.py | 5a36350496b38c5b518c880e49d6cd71aaf91e13 | [
"Apache-2.0"
]
| permissive | hsclinical/leetcode | e9d0e522e249a24b28ab00ddf8d514ec855110d7 | 48a57f6a5d5745199c5685cd2c8f5c4fa293e54a | refs/heads/main | 2023-06-14T11:28:59.458901 | 2021-07-09T18:57:44 | 2021-07-09T18:57:44 | 319,078,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | from typing import List
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
self.matrix = matrix
self.n = len(matrix)
if self.n != 0:
self.m = len(matrix[0])
else:
self.n = 0
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
if self.n != 0:
middleList = []
for i in range(row1, row2+1):
middleList.append(sum(self.matrix[i][col1:(col2+1)]))
return(sum(middleList))
else:
return(0)
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
| [
"[email protected]"
]
| |
07b9d9814ac9cfa9eebb4569b73e71272a56cdc7 | 1b862f34c125ce200244dd79e4fda4b5b605ce2e | /.history/AC_Well_U18_20210720193949.py | d1bd37fe1285363db73ceec0bfe193a13e06ef95 | []
| no_license | edwino26/CoreImages | 26085a49cf1cb79442ae563a88354b2fdceace87 | 6bf6e68cac8ab36c87b1e6ea702bfe6882b0f40e | refs/heads/master | 2023-06-22T12:53:37.344895 | 2021-07-21T04:31:44 | 2021-07-21T04:31:44 | 309,553,247 | 0 | 4 | null | 2021-04-29T23:23:15 | 2020-11-03T02:45:07 | Lasso | UTF-8 | Python | false | false | 11,089 | py | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import numpy as np
import pandas as pd
import math
import lasio
from scipy import interpolate
import matplotlib.pyplot as plt # GRAPHS
import glob
from matplotlib import rcParams
# %%
las1= lasio.read('./LAS/U18/U18_GR.las')
df1= las1.df()
df1.reset_index(inplace=True)
df1 = df1[['GR_EDTC', 'TDEP']]
las2 = lasio.read('./LAS/U18/U18_AT90_NPHI.las')
df2 = las2.df()
df2.reset_index(inplace=True)
df2 = df2[['AT90','NPHI','TDEP',]]
las3 = lasio.read('./LAS/U18/U18_DTCO.las')
df3= las3.df()
df3.reset_index(inplace=True)
df3 = df3[['DTCO', 'TDEP']]
U18_xl =pd.read_excel('./Excel_Files/U18_test.xls',sheet_name = 'U18_data')
df4=U18_xl[['DEPTH','RHOZ']]
#MERGE> to combine the LAS files into 1 df
result = pd.merge(df1,df2, on= 'TDEP',how='left')
result.set_index('TDEP', inplace=True)
df5=pd.merge(result,df3,on= 'TDEP',how='left')
df5.set_index('TDEP', inplace=True)
# %%
# array con nueva tabla para TDEP (prof) con paso de 0.5
dep= np.arange(200,1350,0.5)
f = interpolate.interp1d(df4['DEPTH'], df4['RHOZ'])
RHOZ_new = f(dep)
plt.plot(df4['DEPTH'], df4['RHOZ'], 'o', dep, RHOZ_new, '-')
plt.show()
df6= pd.DataFrame(RHOZ_new,dep, columns=['RHOZ'])
df=pd.DataFrame(df5.join(df6,how='inner',on='TDEP'))
# %%
TDEP= df.index
top=650
bottom=1200
temp=((0.0198*TDEP)+ 26.921)
v= 400000
b=0.88
tsup = 25 #F
WS=18000
RWs= (v/tsup/WS)**b
tf=temp
Kt1=6.77
df['RW']=(RWs*(tsup+Kt1))/(temp+Kt1)
df['Vsh'] = (df.GR_EDTC - 10) / (156 - 10)
df['Vclay']=((0.65)*df.Vsh)
mud_density=1.13835 #en g/cc
rhoss=2.70 # g/cc
rhosh=2.75
df['grain_density']=((df.Vsh*rhosh)+(1-df.Vsh)*rhoss)
df['porosity']=(df.grain_density-(df.RHOZ))/(df.grain_density-mud_density)
# %%
CORE =pd.read_excel('./CORE/CORE.xlsx',sheet_name='XRD')
mask = CORE.Well.isin(['U18'])
U18_Core = CORE[mask]
prof=U18_Core['Depth']
clays=U18_Core['Clays']
xls1 = pd.read_excel ('./CORE/CORE.xlsx', sheet_name='Saturation')
mask = xls1.Well.isin(['U18'])
U18_sat = xls1[mask]
long=U18_sat ['Depth']
poro=U18_sat ['PHIT']
grain=U18_sat ['RHOG']
sw_core=U18_sat ['Sw']
klinkenberg =U18_sat ['K']
minimo=grain.min()
maximo=grain.max()
c=2.65
d=2.75
norm=(((grain-minimo)*(d-c)/(maximo-minimo))+c)
xls2 = pd.read_excel ('./CORE/CORE.xlsx', sheet_name='Gamma')
mask = xls2.Well.isin(['U18'])
U18_GR = xls2[mask]
h=U18_GR['Depth']
cg1=U18_GR['GR_Scaled']
plt.hist(clays,bins=50,facecolor='y',alpha=0.75,ec='black', label="Vclay")
plt.title('Histogram-Vclay')
plt.xlabel('%Vclay')
plt.ylabel('Frecuency')
plt.legend()
# %%
dt = 200
bt= 1350
plt.figure(figsize=(15,9))
plt.subplot(171)
plt.plot(df.GR_EDTC,TDEP,'g',lw=0.5)
plt.title('$GR$')
plt.axis([20, 130, dt,bt])
plt.xlabel('Gamma Ray ')
plt.gca().invert_yaxis()
plt.grid(True)
plt.subplot(172)
plt.plot(df.AT90,TDEP,lw=0.5)
plt.axis([10, 800, dt,bt])
plt.title('$AT90$')
plt.xlabel('Resistivity')
plt.gca().invert_yaxis()
plt.xscale('log')
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(173)
plt.plot(df.RHOZ,TDEP,'red',lw=0.5)
plt.axis([2.25, 2.65, dt,bt])
plt.title('$RHOZ$')
plt.xlabel('Standard \n Resolution \n Formation \n Density')
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(174)
plt.plot(df.NPHI,TDEP,'purple',lw=0.5)
plt.axis([0.6, 0.1, dt,bt])
plt.title('$NPHI$')
plt.xlabel('Thermal \n Neutron \n Porosity',fontsize=8)
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(175)
plt.plot(df.DTCO,TDEP,'r',lw=0.5)
plt.title('$DTCO$')
plt.xlabel('Delta-T \n Compressional ')
plt.axis([60,125, dt,bt])
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(176)
plt.plot(temp,TDEP,'c')
plt.axis([20, 65, dt,bt])
plt.gca().invert_yaxis()
plt.title('$TEMP$')
plt.xlabel('Temperature')
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(177)
plt.plot(df.RW,TDEP,'blue',lw=0.5)
plt.title('$RW$')
plt.axis([0.4, 0.85, dt,bt])
plt.xlabel('Water \n Resistivity')
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.suptitle('U18_WELL LOGS_')
plt.show()
# %%
## SW_Archie
## SW=((a*Rw)/(Rt*(Por)^m))^(1/n)
a=1
m=2
n=2
Rw=df.RW
Rt=df.AT90
Phi=df.porosity
F = (a / (Phi**m))
df['Sw_a'] = (F *Rw/Rt)**(1/n)
df['Sw_a1']= df['Sw_a'].apply(lambda x: 1 if x>1 else x)
df['Sw_a1'] = df['Sw_a1'].replace(np.nan, 1)
dfSh = df[df['Vsh']>0.5]
Rsh = 50
#Sw_Poupon
# TERM1= 1/RT - VSH/RSH
term1=(1/df.AT90)-(df.Vsh/Rsh)
## TERM2 = F*RW
term2=(F*df.RW)
## TERM3 = (1-vsh)
term3=(1-df.Vsh)
## SW_POUPON = ((TERM1*TERM2)/TERM3))^(1/N)
df['Sw_p']=((term1*term2)/term3)**(1/n)
df['Sw_p1']= df['Sw_p'].apply(lambda x: 1 if x >1 else x)
df['Sw_p1'] = df['Sw_p1'].replace(np.nan, 1)
# %%
# WAXMAN-SMITS CEC method (does not require VCL) but requires core measurements of CEC
TempC = (temp-32)/1.8
df['SwWS'] = df['Sw_p1']
CEC_av = 5
# ===== Waxman Smits Iterations. Reference: Well Logging for Earth Scientists, Page 663-667
for i in range(len(Rt)):
error = 1000
count1 = 0
phit = Phi.iloc[i]
if math.isnan(phit):
df['SwWS'][i] = 1
else:
Qv = rhosh*(1-phit)*CEC_av/phit/100 # Old Method
Bcond = 3.83*(1-0.83*np.exp(-0.5/Rw.iloc[i])) # Waxman and Thomas, 1974
BQv = Qv*Bcond
E = (phit**m)/a
Ct = 1/Rt.iloc[i]
Cw = 1/Rw.iloc[i]
x0 = df.iloc[i]['Sw_a1']
Swguess = x0
while count1 <= 100 and error > 0.0001:
count1 = count1+1
g = E*Cw*(Swguess**n) + E*BQv*(Swguess**(n-1)) - Ct
error = g
gp = n*E*Cw*(Swguess**(n-1)) + (n-1)*E*BQv*(Swguess**(n-2))
# print(df_1['SwWS'][i-1])
df['SwWS'].iloc[i] = Swguess-g/gp
Swguess = df['SwWS'].iloc[i]
# %%
# SIMANDOUX (1963) for shaly-sandy formations, used with saline fm waters Equation solved for n=2
# Input parameters:
#Rw - water resistivity
#Rt - true resistivity
#Phi - porosity
#Rsh - shale resistivity
# a - tortuosity factor
# m - cementation exponent
# n - saturation exponent
# Vsh - Volume of shale
df['Swsim']=((a*Rw)/(2*(Phi**m)))*(((df.Vsh/Rsh)**2+((4*Phi**m)/(a*Rw*Rt)))**(1/2)-(df.Vsh/Rsh))
df['Swsim1'] = df['Swsim'].replace(np.nan, 1)
df.head(2000)
# %%
plt.figure(figsize=(15,9))
plt.subplot(191)
plt.plot (df.GR_EDTC,TDEP,'g',cg1,h,'c.',lw=0.5)
plt.title('$GR/ Core.GR $')
plt.axis([20, 130, top,bottom])
plt.xlabel('Gamma Ray ')
plt.gca().invert_yaxis()
plt.grid(True)
plt.subplot(192)
plt.title('Vsh')
plt.plot (df.Vsh,TDEP,'black',lw=0.5)
plt.axis([0,1, top,bottom])
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(193)
plt.title('$Vclay/Vclay Core$')
plt.plot (df.Vclay,TDEP,'m',clays,prof,'ro',lw=0.5)
plt.axis([0,1, top,bottom])
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(194)
plt.title('Porosity \n Core Por.')
plt.plot (df.porosity,TDEP,'m',poro,long,'c*',lw=0.5)
plt.axis([0, 0.3, top,bottom])
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(195)
plt.title('Grain density \n Core GD')
plt.plot (df.grain_density,TDEP,'y',norm,long,'g>',lw=0.5)
plt.axis([2.64, 2.76, top,bottom])
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
#Basic Archie
plt.subplot(196)
plt.plot (df.Sw_a1,TDEP,'c',sw_core,long,'m.',lw=0.5)
plt.title('$SW_A$')
plt.axis([0,1.1,top,bottom])
plt.xlabel('Water \n Saturation_A')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlim(1, 0)
#Poupon Laminated Model
plt.subplot(197)
plt.plot (df.Sw_p1,TDEP,'r',sw_core,long,'m.',lw=0.5)
plt.title('$SW_P$')
plt.axis([0,1.5,top,bottom])
plt.xlabel('Water \n Saturation_P')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlim(1, 0)
#Waxman-Smits
plt.subplot(198)
plt.plot (df.SwWS,TDEP,'g',sw_core,long,'m.',lw=0.5)
plt.title('$SW_W$')
plt.axis([0,1,top,bottom])
plt.xlabel('Water \n Saturation_Waxman')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlim(1, 0)
#Simandoux
plt.subplot(199)
plt.plot (df.Swsim1,TDEP,'y',sw_core,long,'m.',lw=0.5)
plt.title('$SW_S$')
plt.axis([0,1,top,bottom])
plt.xlabel('Water \n Saturation_Sim')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlim(1, 0)
plt.show()
# %%
corte=0.5
df['PAY_archie']=df.Sw_a1.apply(lambda x: 1 if x<corte else 0)
df['PAY_poupon']=df.Sw_p1.apply(lambda x: 1 if x<corte else 0)
df['PAY_waxman']=df.SwWS.apply(lambda x: 1 if x<corte else 0)
df['PAY_simandoux']=df.Swsim1.apply(lambda x: 1 if x<corte else 0)
plt.figure(figsize=(15,9))
plt.subplot(191)
#Basic Archie
plt.plot (df.Sw_a1,TDEP,'c',lw=0.5)
plt.title('$SW_A$')
plt.axis([0,1,top,bottom])
plt.xlabel('Sw_Archie')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.xlim(1, 0)
plt.subplot(192)
plt.plot (df.PAY_archie,TDEP,'c',lw=0.5)
plt.title('$PAY_A$')
plt.fill_between(df.PAY_archie,TDEP, color='c', alpha=0.8)
plt.axis([0,0.001,top,bottom])
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
#Poupon Laminated Model
plt.subplot(193)
plt.plot (df.Sw_p1,TDEP,'r',lw=0.5)
plt.title('$SW_P$')
plt.axis([0,1.5,top,bottom])
plt.xlabel('Sw_Poupon')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.xlim(1, 0)
plt.subplot(194)
plt.plot (df.PAY_poupon,TDEP,'r',lw=0.5)
plt.title('$PAY_P$')
plt.fill_between(df.PAY_poupon,TDEP, color='r', alpha=0.8)
plt.axis([0,0.001,top,bottom])
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
#Waxman-Smits
plt.subplot(195)
plt.plot (df.SwWS,TDEP,'g',lw=0.5)
plt.title('$SW_W$')
plt.axis([0,5,top,bottom])
plt.xlabel('Sw_Waxman')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.xlim(1, 0)
plt.subplot(196)
plt.plot (df.PAY_waxman,TDEP,'g',lw=0.5)
plt.title('$PAY_W$')
plt.fill_between(df.PAY_waxman,TDEP, color='g', alpha=0.8)
plt.axis([0,0.001,top,bottom])
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
#Simandoux
plt.subplot(197)
plt.plot (df.Swsim1,TDEP,'y',lw=0.5)
plt.title('$SW_S$')
plt.axis([0,2,top,bottom])
plt.xlabel('Sw_Simandoux')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.xlim(1, 0)
plt.subplot(198)
plt.plot (df.PAY_simandoux,TDEP,'y',lw=0.5)
plt.title('$PAY_S$')
plt.fill_between(df.PAY_simandoux,TDEP, color='y', alpha=0.8)
plt.axis([0,0.001,top,bottom])
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.show()
# %%
df.insert(21, "WELL", 'U18')
df.head()
with pd.ExcelWriter('U18.xlsx') as writer:
df.to_excel(writer, sheet_name='U18_data')
| [
"[email protected]"
]
| |
06b5c58d9ad0cb17cf0ba76d39d551040997dd6d | e59b939d0cf51783fabf750034cc926cd1830ac7 | /blender/nodes/output/__init__.py | db10ea1183e37c196fd7d1981634e73dc11d93cc | [
"MIT"
]
| permissive | taichi-dev/taichi_elements | cf96e66fd1207d5389709da0c6148e19c990db06 | 2451c9843a7058a1edcd10e448460f4ba509ecc4 | refs/heads/master | 2023-08-19T09:07:36.709483 | 2023-02-09T19:39:25 | 2023-02-09T19:39:25 | 231,448,642 | 400 | 71 | MIT | 2023-07-25T21:29:52 | 2020-01-02T19:46:31 | Python | UTF-8 | Python | false | false | 20 | py | from .mesh import *
| [
"[email protected]"
]
| |
9e6d61a3743d70fc652f40ee1dce7897a9019284 | 0000c8f4a481000676463f81d55c2ea21862cbd5 | /not-yet-done-examples.py | d2d8e2a9c2d8f5293eea6153628712f8ddbc0858 | []
| no_license | robertej19/813 | b5ca9b51504e002189861bc0e1230bd43c5f6005 | f1417f05e9d08d5693d6ecd8363d1dd7552d2e12 | refs/heads/master | 2022-12-18T14:36:26.644424 | 2020-09-21T13:58:06 | 2020-09-21T13:58:06 | 292,097,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | Here is my code for doing the fit and plotting:
8:51
popt, pcov = curve_fit(gauss, xval, yval, sigma=yerror,p0 = [100, 3300, 140],absolute_sigma=False)
xx = np.arange(xmin,xmax)
plt.plot(xx, gauss(xx, *popt), label='fit')
One line method to load a CSV data file into python with numpy
import numpy as np
data=[*zip(*np.genfromtxt('cubeData.csv',delimiter=','))] | [
"[email protected]"
]
| |
749ebd1fc73831af5d53749d304aa4f0729f1cf8 | 0ca0fc2c2aad412d9e2936d5d01fb1abc1539ee4 | /apps/cart/forms.py | bead7dbbce173bcc6584e87b86d1c9a91dad31e7 | [
"MIT"
]
| permissive | yeboahd24/python202 | 1f399426a1f46d72da041ab3d138c582c695462d | 35963db9a4ad5fcd567ce1e98c673f1a2ed2abef | refs/heads/master | 2023-05-06T04:14:19.336839 | 2021-06-02T01:22:44 | 2021-06-02T01:22:44 | 309,841,303 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from django import forms
class CheckoutForm(forms.Form):
first_name = forms.CharField(max_length=255)
last_name = forms.CharField(max_length=255)
email = forms.EmailField(max_length=255)
phone = forms.CharField(max_length=255)
address = forms.CharField(max_length=255)
zipcode = forms.CharField(max_length=255)
place = forms.CharField(max_length=255)
stripe_token = forms.CharField(max_length=255) | [
"[email protected]"
]
| |
18a1ef9adc1cffb62a94ab625de750a18568e630 | ea544b339809095d2c383b542248f530990c31d5 | /env/lib/python3.6/site-packages/pip/_vendor/html5lib/treewalkers/base.py | ba04ae2bb9cec5cf9fc1e3ea2a220624ca47aea1 | [
"BSD-3-Clause"
]
| permissive | 724686158/NosqlEXP3 | 5fab1a9e131c6936b5b61e0f1c86eea2c889294a | e29f2807f075831377456b47cf8c9ce0c8d65c30 | refs/heads/master | 2020-04-09T01:40:54.370782 | 2019-01-25T13:04:04 | 2019-01-25T13:04:04 | 159,912,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,476 | py | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from ..constants import namespaces, voidElements, spaceCharacters
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
spaceCharacters = "".join(spaceCharacters)
class TreeWalker(object):
"""Walks a tree yielding tokens
Tokens are dicts that all have a ``type`` field specifying the type of the
token.
"""
def __init__(self, tree):
"""Creates a TreeWalker
:arg tree: the tree to walk
"""
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
"""Generates an error token with the given message
:arg msg: the error message
:returns: SerializeError token
"""
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
"""Generates an EmptyTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:arg attrs: the attributes of the element as a dict
:arg hasChildren: whether or not to yield a SerializationError because
this tag shouldn't have children
:returns: EmptyTag token
"""
yield {"type": "EmptyTag", "name": name,
"namespace": namespace,
"data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
"""Generates a StartTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:arg attrs: the attributes of the element as a dict
:returns: StartTag token
"""
return {"type": "StartTag",
"name": name,
"namespace": namespace,
"data": attrs}
def endTag(self, namespace, name):
"""Generates an EndTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:returns: EndTag token
"""
return {"type": "EndTag",
"name": name,
"namespace": namespace}
def text(self, data):
"""Generates SpaceCharacters and Characters tokens
Depending on what's in the data, this generates one or more
``SpaceCharacters`` and ``Characters`` tokens.
For project:
>>> from html5lib.treewalkers.base import TreeWalker
>>> # Give it an empty tree just so it instantiates
>>> walker = TreeWalker([])
>>> list(walker.text(''))
[]
>>> list(walker.text(' '))
[{u'data': ' ', u'type': u'SpaceCharacters'}]
>>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE
[{u'data': ' ', u'type': u'SpaceCharacters'},
{u'data': u'abc', u'type': u'Characters'},
{u'data': u' ', u'type': u'SpaceCharacters'}]
:arg data: the text data
:returns: one or more ``SpaceCharacters`` and ``Characters`` tokens
"""
data = data
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
"""Generates a Comment token
:arg data: the comment
:returns: Comment token
"""
return {"type": "Comment", "data": data}
def doctype(self, name, publicId=None, systemId=None):
"""Generates a Doctype token
:arg name:
:arg publicId:
:arg systemId:
:returns: the Doctype token
"""
return {"type": "Doctype",
"name": name,
"publicId": publicId,
"systemId": systemId}
def entity(self, name):
"""Generates an Entity token
:arg name: the entity name
:returns: an Entity token
"""
return {"type": "Entity", "name": name}
def unknown(self, nodeType):
"""Handles unknown node types"""
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (namespace and namespace != namespaces["html"]) or name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.