file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
product.py | from web3 import Web3
from brownie import Contract
from brownie.convert import to_bytes
from brownie.network import accounts
from brownie.network.account import Account
from brownie import (
Wei,
Contract,
# Registry,
# RegistryController,
License,
LicenseController,
Policy,
PolicyController,
QueryController,
ProductService,
OracleService,
ComponentOwnerService,
PolicyFlowDefault,
InstanceOperatorService,
TestOracle,
TestProduct,
)
from scripts.const import (
ORACLE_INPUT_FORMAT,
ORACLE_OUTPUT_FORMAT,
ORACLE_NAME,
PRODUCT_NAME,
)
from scripts.util import (
get_account,
encode_function_data,
# s2h,
s2b32,
deployGifModule,
deployGifService,
)
from scripts.instance import (
GifInstance,
)
class GifTestOracle(object):
def __init__(self, instance: GifInstance, oracleOwner: Account):
operatorService = instance.getInstanceOperatorService()
componentOwnerService = instance.getComponentOwnerService()
oracleService = instance.getOracleService()
# 1) add oracle provider role to owner
opRole = operatorService.oracleProviderRole()
operatorService.addRoleToAccount(oracleOwner, opRole)
# 2) oracle owner creates oracle
self.oracle = TestOracle.deploy(
s2b32(ORACLE_NAME),
instance.getRegistry(),
{'from': oracleOwner})
# 3) oracle owner proposes oracle to instance
componentOwnerService.propose(
self.oracle,
{'from': oracleOwner})
# 4) instance operator approves oracle
operatorService.approveOracle(
self.oracle.getId(),
{'from': instance.getOwner()})
def getOracleId(self) -> int:
return self.oracle.getId()
def getOracleContract(self) -> TestOracle:
return self.oracle
class GifTestProduct(object):
def __init__(self, instance: GifInstance, oracle: GifTestOracle, productOwner: Account):
self.policyController = instance.getPolicyController()
operatorService = instance.getInstanceOperatorService()
productService = instance.getProductService()
self.product = TestProduct.deploy(
productService,
s2b32(PRODUCT_NAME),
oracle.getOracleId(),
{'from': productOwner})
operatorService.approveProduct(
self.product.getId(),
{'from': instance.getOwner()})
def getProductId(self) -> int:
return self.product.getId()
def getProductContract(self) -> TestProduct:
return self.product
def | (self, policyId: str):
return self.policyController.getPolicy(policyId) | getPolicy |
fscore_test.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the fscore metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow_graphics.nn.metric import fscore
from tensorflow_graphics.nn.metric import precision
from tensorflow_graphics.nn.metric import recall
from tensorflow_graphics.util import test_case
def random_tensor(tensor_shape):
return np.random.uniform(low=0.0, high=1.0, size=tensor_shape)
def random_tensor_shape():
tensor_size = np.random.randint(5) + 1
return np.random.randint(1, 10, size=(tensor_size)).tolist()
def binary_precision_function(ground_truth, predictions):
return precision.evaluate(ground_truth, predictions, classes=[1])
def | (ground_truth, predictions):
return recall.evaluate(ground_truth, predictions, classes=[1])
class FscoreTest(test_case.TestCase):
@parameterized.parameters(
# Precision = 0.5, Recall = 0.25.
((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 2 * (0.5 * 0.25) / (0.5 + 0.25)),
# Precision = 1, Recall = 1.
((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1),
# Precision = 0, Recall = 0.
((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0))
def test_evaluate_preset(self, ground_truth, predictions, expected_fscore):
tensor_shape = random_tensor_shape()
ground_truth_labels = np.tile(ground_truth, tensor_shape + [1])
predicted_labels = np.tile(predictions, tensor_shape + [1])
expected = np.tile(expected_fscore, tensor_shape)
result = fscore.evaluate(
ground_truth_labels,
predicted_labels,
precision_function=binary_precision_function,
recall_function=binary_recall_function)
self.assertAllClose(expected, result)
@parameterized.parameters(
("Not all batch dimensions are broadcast-compatible.", (1, 5, 3), (4, 3)),
("Not all batch dimensions are broadcast-compatible.", (3, 4), (2, 4, 5)),
)
def test_evaluate_shape_exception_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(fscore.evaluate, error_msg, shape)
@parameterized.parameters(
((1, 5, 3), (2, 5, 1)),
((None, 2, 6), (4, 2, None)),
((3, 1, 1, 2), (3, 5, 8, 2)),
)
def test_evaluate_shape_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(fscore.evaluate, shapes)
if __name__ == "__main__":
test_case.main()
| binary_recall_function |
test.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that `#[test]` works with extern-absolute-paths enabled.
// |
// compile-flags: --test
#![feature(extern_absolute_paths)]
#[test]
fn test() {
} | // Regression test for #47075. |
skiper-user-invoice.service.spec.ts | import { Test, TestingModule } from '@nestjs/testing';
import { SkiperUserInvoiceService } from './skiper-user-invoice.service';
describe('SkiperUserInvoiceService', () => {
let service: SkiperUserInvoiceService; | }).compile();
service = module.get<SkiperUserInvoiceService>(SkiperUserInvoiceService);
});
it('should be defined', () => {
expect(service).toBeDefined();
});
}); |
beforeEach(async () => {
const module: TestingModule = await Test.createTestingModule({
providers: [SkiperUserInvoiceService], |
furcode.js | function | (code) {
var cmd = code.split(' ').slice(0,1)
if (cmd == "owo") {
var owoText = code.replace("owo ","");
console.log(owoText)
}
if (cmd == "uwu") {
var uwuArgs = code.replace("uwu ","");
var arg1 = uwuArgs.split(" ").slice(0,1)
var arg2 = uwuArgs.split(" ").slice(1,2)
var lastuwuoutput = uwuArgs.split(" ").slice(2);
if (arg1.toString() == arg2.toString()) {
console.log(lastuwuoutput.toString())
}
}
if (cmd == "uwuEval") {
var uwuArgs = code.replace("uwuEval ","");
var arg1 = uwuArgs.split(" ").slice(0,1)
var arg2 = uwuArgs.split(" ").slice(1,2)
var lastuwuoutput = uwuArgs.split(" ").slice(2);
if (arg1.toString() == arg2.toString()) {
eval(lastuwuoutput.toString())
}
}
if (cmd == "fursuit") {
var uwuArgs = code.replace("fursuit ","");
var arg1 = uwuArgs.split(" ").slice(0,1)
var arg2 = uwuArgs.split(" ").slice(1,2)
var lastuwuoutput = uwuArgs.split(" ").slice(2);
if (arg1.toString() != arg2.toString()) {
console.log(lastuwuoutput.toString())
}
}
if (cmd == "fursuitEval") {
var uwuArgs = code.replace("fursuitEval ","");
var arg1 = uwuArgs.split(" ").slice(0,1)
var arg2 = uwuArgs.split(" ").slice(1,2)
var lastuwuoutput = uwuArgs.split(" ").slice(2);
if (arg1.toString() != arg2.toString()) {
eval(lastuwuoutput.toString())
}
}
}
| fureval |
speech.py | def rec():
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source)
try:
text = r.recognize_google(audio)
return(text)
except:
return("Sorry, couldn't recognize your voice. Please try again.") | import speech_recognition as sr
|
|
graphics_file.py | # -*- coding: utf-8 -*-
"""
Wrapper for Graphics Files
"""
import os
import six
from sage.misc.temporary_file import tmp_filename
from sage.structure.sage_object import SageObject
import sage.doctest
class Mime(object):
TEXT = u'text/plain'
HTML = u'text/html'
LATEX = u'text/latex'
JSON = u'application/json'
JAVASCRIPT = u'application/javascript'
PDF = u'application/pdf'
PNG = u'image/png'
JPG = u'image/jpeg'
SVG = u'image/svg+xml'
JMOL = u'application/jmol'
@classmethod
def validate(cls, value):
"""
Check that input is known mime type
INPUT:
- ``value`` -- string.
OUTPUT:
Unicode string of that mime type. A ``ValueError`` is raised
if input is incorrect / unknown.
EXAMPLES::
sage: from sage.structure.graphics_file import Mime
sage: Mime.validate('image/png')
u'image/png'
sage: Mime.validate('foo/bar')
Traceback (most recent call last):
...
ValueError: unknown mime type
"""
value = str(value).lower()
for k, v in cls.__dict__.items():
if isinstance(v, six.string_types) and v == value:
return v
raise ValueError('unknown mime type')
@classmethod
def extension(cls, mime_type):
"""
Return file extension.
INPUT:
- ``mime_type`` -- mime type as string.
OUTPUT:
String containing the usual file extension for that type of
file. Excludes ``os.extsep``.
EXAMPLES::
sage: from sage.structure.graphics_file import Mime
sage: Mime.extension('image/png')
'png'
"""
try:
return preferred_filename_ext[mime_type]
except KeyError:
raise ValueError('no known extension for mime type')
preferred_filename_ext = {
Mime.TEXT: 'txt',
Mime.HTML: 'html',
Mime.LATEX: 'tex',
Mime.JSON: 'json',
Mime.JAVASCRIPT: 'js',
Mime.PDF: 'pdf',
Mime.PNG: 'png',
Mime.JPG: 'jpg',
Mime.SVG: 'svg',
Mime.JMOL: 'spt.zip',
}
mimetype_for_ext = dict(
(value, key) for (key, value) in preferred_filename_ext.items()
)
class GraphicsFile(SageObject):
def __init__(self, filename, mime_type=None):
"""
Wrapper around a graphics file.
"""
self._filename = filename
if mime_type is None:
mime_type = self._guess_mime_type(filename)
self._mime = Mime.validate(mime_type)
def _guess_mime_type(self, filename):
"""
Guess mime type from file extension
"""
ext = os.path.splitext(filename)[1]
ext = ext.lstrip(os.path.extsep)
try:
return mimetype_for_ext[ext]
except KeyError:
raise ValueError('unknown file extension, please specify mime type')
def _repr_(self):
|
def filename(self):
return self._filename
def save_as(self, filename):
"""
Make the file available under a new filename.
INPUT:
- ``filename`` -- string. The new filename.
The newly-created ``filename`` will be a hardlink if
possible. If not, an independent copy is created.
"""
try:
os.link(self.filename(), filename)
except OSError:
import shutil
shutil.copy2(self.filename(), filename)
def mime(self):
return self._mime
def data(self):
"""
Return a byte string containing the image file.
"""
with open(self._filename, 'rb') as f:
return f.read()
def launch_viewer(self):
"""
Launch external viewer for the graphics file.
.. note::
Does not actually launch a new process when doctesting.
EXAMPLES::
sage: from sage.structure.graphics_file import GraphicsFile
sage: g = GraphicsFile('/tmp/test.png', 'image/png')
sage: g.launch_viewer()
"""
if sage.doctest.DOCTEST_MODE:
return
from sage.plot.plot import EMBEDDED_MODE
if EMBEDDED_MODE:
raise RuntimeError('should never launch viewer in embedded mode')
if self.mime() == Mime.JMOL:
return self._launch_jmol()
from sage.misc.viewer import viewer
command = viewer(preferred_filename_ext[self.mime()])
os.system('{0} {1} 2>/dev/null 1>/dev/null &'
.format(command, self.filename()))
# TODO: keep track of opened processes...
def _launch_jmol(self):
launch_script = tmp_filename(ext='.spt')
with open(launch_script, 'w') as f:
f.write('set defaultdirectory "{0}"\n'.format(self.filename()))
f.write('script SCRIPT\n')
from sage.env import SAGE_LOCAL
JMOL = os.path.join(SAGE_LOCAL, 'bin', 'jmol')
os.system('{0} {1} 2>/dev/null 1>/dev/null &'
.format(JMOL, launch_script))
def sagenb_embedding(self):
"""
Embed in SageNB
This amounts to just placing the file in the cell
directory. The notebook will then try to guess what we want
with it.
"""
from sage.misc.temporary_file import graphics_filename
ext = "." + Mime.extension(self.mime())
fn = graphics_filename(ext=ext)
self.save_as(fn)
# Client-server sagenb requires this to be world-readable.
# See Trac #17755.
os.chmod(fn, 0o644)
def graphics_from_save(save_function, preferred_mime_types,
allowed_mime_types=None, figsize=None, dpi=None):
"""
Helper function to construct a graphics file.
INPUT:
- ``save_function`` -- callable that can save graphics to a file
and accepts options like
:meth:`sage.plot.graphics.Graphics.save``.
- ``preferred_mime_types`` -- list of mime types. The graphics
output mime types in order of preference (i.e. best quality to
worst).
- ``allowed_mime_types`` -- set of mime types (as strings). The
graphics types that we can display. Output, if any, will be one
of those.
- ``figsize`` -- pair of integers (optional). The desired graphics
size in pixels. Suggested, but need not be respected by the
output.
- ``dpi`` -- integer (optional). The desired resolution in dots
per inch. Suggested, but need not be respected by the output.
OUTPUT:
Return an instance of
:class:`sage.structure.graphics_file.GraphicsFile` encapsulating a
suitable image file. Image is one of the
``preferred_mime_types``. If ``allowed_mime_types`` is specified,
the resulting file format matches one of these.
Alternatively, this function can return ``None`` to indicate that
textual representation is preferable and/or no graphics with the
desired mime type can be generated.
"""
# Figure out best mime type
mime = None
if allowed_mime_types is None:
mime = Mime.PNG
else:
# order of preference
for m in preferred_mime_types:
if m in allowed_mime_types:
mime = m
break
if mime is None:
return None # don't know how to generate suitable graphics
# Generate suitable temp file
filename = tmp_filename(ext=os.path.extsep + Mime.extension(mime))
# Call the save_function with the right arguments
kwds = {}
if figsize is not None:
kwds['figsize'] = figsize
if dpi is not None:
kwds['dpi'] = dpi
save_function(filename, **kwds)
return GraphicsFile(filename, mime)
| """
Return a string representation.
"""
return 'Graphics file {0}'.format(self.mime()) |
views.py | import logging
import csv
import re
from openpyxl import Workbook
from openpyxl.styles import Font
from tempfile import NamedTemporaryFile
from datetime import datetime
import operator
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ValidationError, PermissionDenied
from django.urls import reverse
from django.db.models import Q, Count
from django.http import HttpResponseRedirect, StreamingHttpResponse, HttpResponse, FileResponse, QueryDict
from django.shortcuts import render, get_object_or_404
from django.views.decorators.cache import cache_page
from django.utils import timezone
from time import strftime
from django.contrib.admin.utils import NestedObjects
from django.db import DEFAULT_DB_ALIAS
from dojo.engagement.services import close_engagement, reopen_engagement
from dojo.filters import EngagementFilter, EngagementDirectFilter, EngagementTestFilter
from dojo.forms import CheckForm, \
UploadThreatForm, RiskAcceptanceForm, NoteForm, DoneForm, \
EngForm, TestForm, ReplaceRiskAcceptanceProofForm, AddFindingsRiskAcceptanceForm, DeleteEngagementForm, ImportScanForm, \
CredMappingForm, JIRAEngagementForm, JIRAImportScanForm, TypedNoteForm, JIRAProjectForm, \
EditRiskAcceptanceForm
from dojo.models import Finding, Product, Engagement, Test, \
Check_List, Test_Import, Notes, \
Risk_Acceptance, Development_Environment, Endpoint, \
Cred_Mapping, Dojo_User, System_Settings, Note_Type, Product_API_Scan_Configuration
from dojo.tools.factory import get_scan_types_sorted
from dojo.utils import add_error_message_to_response, add_success_message_to_response, get_page_items, add_breadcrumb, handle_uploaded_threat, \
FileIterWrapper, get_cal_event, Product_Tab, is_scan_file_too_large, \
get_system_setting, redirect_to_return_url_or_else, get_return_url
from dojo.notifications.helper import create_notification
from dojo.finding.views import find_available_notetypes
from functools import reduce
from django.db.models.query import Prefetch, QuerySet
import dojo.jira_link.helper as jira_helper
import dojo.risk_acceptance.helper as ra_helper
from dojo.risk_acceptance.helper import prefetch_for_expiration
from dojo.finding.helper import NOT_ACCEPTED_FINDINGS_QUERY
from django.views.decorators.vary import vary_on_cookie
from dojo.authorization.authorization import user_has_permission_or_403
from dojo.authorization.roles_permissions import Permissions
from dojo.product.queries import get_authorized_products
from dojo.engagement.queries import get_authorized_engagements
from dojo.authorization.authorization_decorators import user_is_authorized
from dojo.importers.importer.importer import DojoDefaultImporter as Importer
import dojo.notifications.helper as notifications_helper
from dojo.endpoint.utils import save_endpoints_to_add
logger = logging.getLogger(__name__)
@cache_page(60 * 5) # cache for 5 minutes
@vary_on_cookie
def engagement_calendar(request):
if 'lead' not in request.GET or '0' in request.GET.getlist('lead'):
engagements = get_authorized_engagements(Permissions.Engagement_View)
else:
filters = []
leads = request.GET.getlist('lead', '')
if '-1' in request.GET.getlist('lead'):
leads.remove('-1')
filters.append(Q(lead__isnull=True))
filters.append(Q(lead__in=leads))
engagements = get_authorized_engagements(Permissions.Engagement_View).filter(reduce(operator.or_, filters))
engagements = engagements.select_related('lead')
engagements = engagements.prefetch_related('product')
add_breadcrumb(
title="Engagement Calendar", top_level=True, request=request)
return render(
request, 'dojo/calendar.html', {
'caltype': 'engagements',
'leads': request.GET.getlist('lead', ''),
'engagements': engagements,
'users': Dojo_User.objects.all()
})
def get_filtered_engagements(request, view):
if view not in ['all', 'active']:
raise ValidationError(f'View {view} is not allowed')
engagements = get_authorized_engagements(Permissions.Engagement_View).order_by('-target_start')
if view == 'active':
engagements = engagements.filter(active=True)
engagements = engagements.select_related('product', 'product__prod_type') \
.prefetch_related('lead', 'tags', 'product__tags')
if System_Settings.objects.get().enable_jira:
engagements = engagements.prefetch_related(
'jira_project__jira_instance',
'product__jira_project_set__jira_instance'
)
engagements = EngagementDirectFilter(request.GET, queryset=engagements)
return engagements
def get_test_counts(engagements):
# Get the test counts per engagement. As a separate query, this is much
# faster than annotating the above `engagements` query.
engagement_test_counts = {
test['engagement']: test['test_count']
for test in Test.objects.filter(
engagement__in=engagements
).values(
'engagement'
).annotate(
test_count=Count('engagement')
)
}
return engagement_test_counts
def engagements(request, view):
if not view:
view = 'active'
filtered_engagements = get_filtered_engagements(request, view)
engs = get_page_items(request, filtered_engagements.qs, 25)
product_name_words = sorted(get_authorized_products(Permissions.Product_View).values_list('name', flat=True))
engagement_name_words = sorted(get_authorized_engagements(Permissions.Engagement_View).values_list('name', flat=True).distinct())
add_breadcrumb(
title=f"{view.capitalize()} Engagements",
top_level=not len(request.GET),
request=request)
return render(
request, 'dojo/engagement.html', {
'engagements': engs,
'engagement_test_counts': get_test_counts(filtered_engagements.qs),
'filter_form': filtered_engagements.form,
'product_name_words': product_name_words,
'engagement_name_words': engagement_name_words,
'view': view.capitalize(),
})
def engagements_all(request):
products_with_engagements = get_authorized_products(Permissions.Engagement_View)
products_with_engagements = products_with_engagements.filter(~Q(engagement=None)).distinct()
# count using prefetch instead of just using 'engagement__set_test_test` to avoid loading all test in memory just to count them
filter_qs = products_with_engagements.prefetch_related(
Prefetch('engagement_set', queryset=Engagement.objects.all().annotate(test_count=Count('test__id')))
)
filter_qs = filter_qs.prefetch_related(
'engagement_set__tags',
'prod_type',
'engagement_set__lead',
'tags',
)
if System_Settings.objects.get().enable_jira:
filter_qs = filter_qs.prefetch_related(
'engagement_set__jira_project__jira_instance',
'jira_project_set__jira_instance'
)
filtered = EngagementFilter(
request.GET,
queryset=filter_qs
)
prods = get_page_items(request, filtered.qs, 25)
name_words = products_with_engagements.values_list('name', flat=True)
eng_words = get_authorized_engagements(Permissions.Engagement_View).values_list('name', flat=True).distinct()
add_breadcrumb(
title="All Engagements",
top_level=not len(request.GET),
request=request)
return render(
request, 'dojo/engagements_all.html', {
'products': prods,
'filter_form': filtered.form,
'name_words': sorted(set(name_words)),
'eng_words': sorted(set(eng_words)),
})
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def edit_engagement(request, eid):
engagement = Engagement.objects.get(pk=eid)
is_ci_cd = engagement.engagement_type == "CI/CD"
jira_project_form = None
jira_epic_form = None
jira_project = None
jira_error = False
if request.method == 'POST':
form = EngForm(request.POST, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user)
jira_project = jira_helper.get_jira_project(engagement, use_inheritance=False)
if form.is_valid():
# first save engagement details
new_status = form.cleaned_data.get('status')
engagement = form.save(commit=False)
if (new_status == "Cancelled" or new_status == "Completed"):
engagement.active = False
create_notification(event='close_engagement',
title='Closure of %s' % engagement.name,
description='The engagement "%s" was closed' % (engagement.name),
engagement=engagement, url=reverse('engagement_all_findings', args=(engagement.id, ))),
else:
engagement.active = True
engagement.save()
form.save_m2m()
messages.add_message(
request,
messages.SUCCESS,
'Engagement updated successfully.',
extra_tags='alert-success')
success, jira_project_form = jira_helper.process_jira_project_form(request, instance=jira_project, target='engagement', engagement=engagement, product=engagement.product)
error = not success
success, jira_epic_form = jira_helper.process_jira_epic_form(request, engagement=engagement)
error = error or not success
if not error:
if '_Add Tests' in request.POST:
return HttpResponseRedirect(
reverse('add_tests', args=(engagement.id, )))
else:
return HttpResponseRedirect(
reverse('view_engagement', args=(engagement.id, )))
else:
logger.debug(form.errors)
else:
form = EngForm(initial={'product': engagement.product}, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user)
jira_epic_form = None
if get_system_setting('enable_jira'):
jira_project = jira_helper.get_jira_project(engagement, use_inheritance=False)
jira_project_form = JIRAProjectForm(instance=jira_project, target='engagement', product=engagement.product)
logger.debug('showing jira-epic-form')
jira_epic_form = JIRAEngagementForm(instance=engagement)
if is_ci_cd:
title = 'Edit CI/CD Engagement'
else:
title = 'Edit Interactive Engagement'
product_tab = Product_Tab(engagement.product.id, title=title, tab="engagements")
product_tab.setEngagement(engagement)
return render(request, 'dojo/new_eng.html', {
'product_tab': product_tab,
'title': title,
'form': form,
'edit': True,
'jira_epic_form': jira_epic_form,
'jira_project_form': jira_project_form,
'engagement': engagement,
})
@user_is_authorized(Engagement, Permissions.Engagement_Delete, 'eid')
def delete_engagement(request, eid):
engagement = get_object_or_404(Engagement, pk=eid)
product = engagement.product
form = DeleteEngagementForm(instance=engagement)
if request.method == 'POST':
if 'id' in request.POST and str(engagement.id) == request.POST['id']:
form = DeleteEngagementForm(request.POST, instance=engagement)
if form.is_valid():
product = engagement.product
engagement.delete()
messages.add_message(
request,
messages.SUCCESS,
'Engagement and relationships removed.',
extra_tags='alert-success')
create_notification(event='other',
title='Deletion of %s' % engagement.name,
product=product,
description='The engagement "%s" was deleted by %s' % (engagement.name, request.user),
url=request.build_absolute_uri(reverse('view_engagements', args=(product.id, ))),
recipients=[engagement.lead],
icon="exclamation-triangle")
return HttpResponseRedirect(reverse("view_engagements", args=(product.id, )))
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([engagement])
rels = collector.nested()
product_tab = Product_Tab(product.id, title="Delete Engagement", tab="engagements")
product_tab.setEngagement(engagement)
return render(request, 'dojo/delete_engagement.html', {
'product_tab': product_tab,
'engagement': engagement,
'form': form,
'rels': rels,
})
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def view_engagement(request, eid):
eng = get_object_or_404(Engagement, id=eid)
tests = eng.test_set.all().order_by('test_type__name', '-updated')
default_page_num = 10
tests_filter = EngagementTestFilter(request.GET, queryset=tests, engagement=eng)
paged_tests = get_page_items(request, tests_filter.qs, default_page_num)
# prefetch only after creating the filters to avoid https://code.djangoproject.com/ticket/23771 and https://code.djangoproject.com/ticket/25375
paged_tests.object_list = prefetch_for_view_tests(paged_tests.object_list)
prod = eng.product
risks_accepted = eng.risk_acceptance.all().select_related('owner').annotate(accepted_findings_count=Count('accepted_findings__id'))
preset_test_type = None
network = None
if eng.preset:
preset_test_type = eng.preset.test_type.all()
network = eng.preset.network_locations.all()
system_settings = System_Settings.objects.get()
jissue = jira_helper.get_jira_issue(eng)
jira_project = jira_helper.get_jira_project(eng)
try:
check = Check_List.objects.get(engagement=eng)
except:
check = None
pass
notes = eng.notes.all()
note_type_activation = Note_Type.objects.filter(is_active=True).count()
if note_type_activation:
available_note_types = find_available_notetypes(notes)
form = DoneForm()
files = eng.files.all()
if request.method == 'POST':
user_has_permission_or_403(request.user, eng, Permissions.Note_Add)
eng.progress = 'check_list'
eng.save()
if note_type_activation:
form = TypedNoteForm(request.POST, available_note_types=available_note_types)
else:
form = NoteForm(request.POST)
if form.is_valid():
new_note = form.save(commit=False)
new_note.author = request.user
new_note.date = timezone.now()
new_note.save()
eng.notes.add(new_note)
if note_type_activation:
form = TypedNoteForm(available_note_types=available_note_types)
else:
form = NoteForm()
url = request.build_absolute_uri(reverse("view_engagement", args=(eng.id,)))
title = "Engagement: %s on %s" % (eng.name, eng.product.name)
messages.add_message(request,
messages.SUCCESS,
'Note added successfully.',
extra_tags='alert-success')
else:
if note_type_activation:
form = TypedNoteForm(available_note_types=available_note_types)
else:
form = NoteForm()
creds = Cred_Mapping.objects.filter(
product=eng.product).select_related('cred_id').order_by('cred_id')
cred_eng = Cred_Mapping.objects.filter(
engagement=eng.id).select_related('cred_id').order_by('cred_id')
add_breadcrumb(parent=eng, top_level=False, request=request)
title = ""
if eng.engagement_type == "CI/CD":
title = " CI/CD"
product_tab = Product_Tab(prod.id, title="View" + title + " Engagement", tab="engagements")
product_tab.setEngagement(eng)
return render(
request, 'dojo/view_eng.html', {
'eng': eng, | 'filter': tests_filter,
'check': check,
'threat': eng.tmodel_path,
'form': form,
'notes': notes,
'files': files,
'risks_accepted': risks_accepted,
'jissue': jissue,
'jira_project': jira_project,
'creds': creds,
'cred_eng': cred_eng,
'network': network,
'preset_test_type': preset_test_type
})
def prefetch_for_view_tests(tests):
prefetched = tests
if isinstance(tests,
QuerySet): # old code can arrive here with prods being a list because the query was already executed
prefetched = prefetched.select_related('lead')
prefetched = prefetched.prefetch_related('tags', 'test_type', 'notes')
prefetched = prefetched.annotate(count_findings_test_all=Count('finding__id', distinct=True))
prefetched = prefetched.annotate(count_findings_test_active=Count('finding__id', filter=Q(finding__active=True), distinct=True))
prefetched = prefetched.annotate(count_findings_test_active_verified=Count('finding__id', filter=Q(finding__active=True) & Q(finding__verified=True), distinct=True))
prefetched = prefetched.annotate(count_findings_test_mitigated=Count('finding__id', filter=Q(finding__is_mitigated=True), distinct=True))
prefetched = prefetched.annotate(count_findings_test_dups=Count('finding__id', filter=Q(finding__duplicate=True), distinct=True))
prefetched = prefetched.annotate(total_reimport_count=Count('test_import__id', filter=Q(test_import__type=Test_Import.REIMPORT_TYPE), distinct=True))
else:
logger.warn('unable to prefetch because query was already executed')
return prefetched
@user_is_authorized(Engagement, Permissions.Test_Add, 'eid')
def add_tests(request, eid):
eng = Engagement.objects.get(id=eid)
cred_form = CredMappingForm()
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
engagement=eng).order_by('cred_id')
if request.method == 'POST':
form = TestForm(request.POST, engagement=eng)
cred_form = CredMappingForm(request.POST)
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
engagement=eng).order_by('cred_id')
if form.is_valid():
new_test = form.save(commit=False)
# set default scan_type as it's used in reimport
new_test.scan_type = new_test.test_type.name
new_test.engagement = eng
try:
new_test.lead = User.objects.get(id=form['lead'].value())
except:
new_test.lead = None
pass
# Set status to in progress if a test is added
if eng.status != "In Progress" and eng.active is True:
eng.status = "In Progress"
eng.save()
new_test.save()
# Save the credential to the test
if cred_form.is_valid():
if cred_form.cleaned_data['cred_user']:
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
pk=cred_form.cleaned_data['cred_user'].id,
engagement=eid).first()
new_f = cred_form.save(commit=False)
new_f.test = new_test
new_f.cred_id = cred_user.cred_id
new_f.save()
messages.add_message(
request,
messages.SUCCESS,
'Test added successfully.',
extra_tags='alert-success')
notifications_helper.notify_test_created(new_test)
if '_Add Another Test' in request.POST:
return HttpResponseRedirect(
reverse('add_tests', args=(eng.id, )))
elif '_Add Findings' in request.POST:
return HttpResponseRedirect(
reverse('add_findings', args=(new_test.id, )))
elif '_Finished' in request.POST:
return HttpResponseRedirect(
reverse('view_engagement', args=(eng.id, )))
else:
form = TestForm(engagement=eng)
form.initial['target_start'] = eng.target_start
form.initial['target_end'] = eng.target_end
form.initial['lead'] = request.user
add_breadcrumb(
parent=eng, title="Add Tests", top_level=False, request=request)
product_tab = Product_Tab(eng.product.id, title="Add Tests", tab="engagements")
product_tab.setEngagement(eng)
return render(request, 'dojo/add_tests.html', {
'product_tab': product_tab,
'form': form,
'cred_form': cred_form,
'eid': eid,
'eng': eng
})
# Cant use the easy decorator because of the potential for either eid/pid being used
def import_scan_results(request, eid=None, pid=None):
engagement = None
form = ImportScanForm()
cred_form = CredMappingForm()
finding_count = 0
jform = None
user = request.user
if eid:
engagement = get_object_or_404(Engagement, id=eid)
engagement_or_product = engagement
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(engagement=engagement).order_by('cred_id')
elif pid:
product = get_object_or_404(Product, id=pid)
engagement_or_product = product
elif not user.is_staff:
raise PermissionDenied
user_has_permission_or_403(user, engagement_or_product, Permissions.Import_Scan_Result)
push_all_jira_issues = jira_helper.is_push_all_issues(engagement_or_product)
if request.method == "POST":
form = ImportScanForm(request.POST, request.FILES)
cred_form = CredMappingForm(request.POST)
cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(
engagement=engagement).order_by('cred_id')
if jira_helper.get_jira_project(engagement_or_product):
jform = JIRAImportScanForm(request.POST, push_all=push_all_jira_issues, prefix='jiraform')
logger.debug('jform valid: %s', jform.is_valid())
logger.debug('jform errors: %s', jform.errors)
if form.is_valid() and (jform is None or jform.is_valid()):
scan = request.FILES.get('file', None)
scan_date = form.cleaned_data['scan_date']
minimum_severity = form.cleaned_data['minimum_severity']
active = form.cleaned_data['active']
verified = form.cleaned_data['verified']
scan_type = request.POST['scan_type']
tags = form.cleaned_data['tags']
version = form.cleaned_data['version']
branch_tag = form.cleaned_data.get('branch_tag', None)
build_id = form.cleaned_data.get('build_id', None)
commit_hash = form.cleaned_data.get('commit_hash', None)
api_scan_configuration = form.cleaned_data.get('api_scan_configuration', None)
service = form.cleaned_data.get('service', None)
close_old_findings = form.cleaned_data.get('close_old_findings', None)
# Will save in the provided environment or in the `Development` one if absent
environment_id = request.POST.get('environment', 'Development')
environment = Development_Environment.objects.get(id=environment_id)
group_by = form.cleaned_data.get('group_by', None)
# TODO move to form validation?
if scan and is_scan_file_too_large(scan):
messages.add_message(request,
messages.ERROR,
"Report file is too large. Maximum supported size is {} MB".format(settings.SCAN_FILE_MAX_SIZE),
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('import_scan_results', args=(engagement,)))
# Allows for a test to be imported with an engagement created on the fly
if engagement is None:
engagement = Engagement()
engagement.name = "AdHoc Import - " + strftime("%a, %d %b %Y %X", timezone.now().timetuple())
engagement.threat_model = False
engagement.api_test = False
engagement.pen_test = False
engagement.check_list = False
engagement.target_start = timezone.now().date()
engagement.target_end = timezone.now().date()
engagement.product = product
engagement.active = True
engagement.status = 'In Progress'
engagement.version = version
engagement.branch_tag = branch_tag
engagement.build_id = build_id
engagement.commit_hash = commit_hash
engagement.save()
# can't use helper as when push_all_jira_issues is True, the checkbox gets disabled and is always false
# push_to_jira = jira_helper.is_push_to_jira(new_finding, jform.cleaned_data.get('push_to_jira'))
push_to_jira = push_all_jira_issues or (jform and jform.cleaned_data.get('push_to_jira'))
error = False
# Save newly added endpoints
added_endpoints = save_endpoints_to_add(form.endpoints_to_add_list, engagement.product)
try:
importer = Importer()
test, finding_count, closed_finding_count = importer.import_scan(scan, scan_type, engagement, user, environment, active=active, verified=verified, tags=tags,
minimum_severity=minimum_severity, endpoints_to_add=list(form.cleaned_data['endpoints']) + added_endpoints, scan_date=scan_date,
version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, push_to_jira=push_to_jira,
close_old_findings=close_old_findings, group_by=group_by, api_scan_configuration=api_scan_configuration, service=service)
message = f'{scan_type} processed a total of {finding_count} findings'
if close_old_findings:
message = message + ' and closed %d findings' % (closed_finding_count)
message = message + "."
add_success_message_to_response(message)
except Exception as e:
logger.exception(e)
add_error_message_to_response('An exception error occurred during the report import:%s' % str(e))
error = True
# Save the credential to the test
if cred_form.is_valid():
if cred_form.cleaned_data['cred_user']:
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
pk=cred_form.cleaned_data['cred_user'].id,
engagement=eid).first()
new_f = cred_form.save(commit=False)
new_f.test = test
new_f.cred_id = cred_user.cred_id
new_f.save()
if not error:
return HttpResponseRedirect(
reverse('product_open_findings', args=(pid, )))
prod_id = None
custom_breadcrumb = None
title = "Import Scan Results"
if engagement:
prod_id = engagement.product.id
product_tab = Product_Tab(prod_id, title=title, tab="engagements")
product_tab.setEngagement(engagement)
else:
prod_id = pid
custom_breadcrumb = {"", ""}
product_tab = Product_Tab(prod_id, title=title, tab="findings")
if jira_helper.get_jira_project(engagement_or_product):
jform = JIRAImportScanForm(push_all=push_all_jira_issues, prefix='jiraform')
form.fields['endpoints'].queryset = Endpoint.objects.filter(product__id=product_tab.product.id)
form.fields['api_scan_configuration'].queryset = Product_API_Scan_Configuration.objects.filter(product__id=product_tab.product.id)
return render(request,
'dojo/import_scan_results.html',
{'form': form,
'product_tab': product_tab,
'engagement_or_product': engagement_or_product,
'custom_breadcrumb': custom_breadcrumb,
'title': title,
'cred_form': cred_form,
'jform': jform,
'scan_types': get_scan_types_sorted(),
})
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def close_eng(request, eid):
eng = Engagement.objects.get(id=eid)
close_engagement(eng)
messages.add_message(
request,
messages.SUCCESS,
'Engagement closed successfully.',
extra_tags='alert-success')
create_notification(event='close_engagement',
title='Closure of %s' % eng.name,
description='The engagement "%s" was closed' % (eng.name),
engagement=eng, url=reverse('engagement_all_findings', args=(eng.id, ))),
return HttpResponseRedirect(reverse("view_engagements", args=(eng.product.id, )))
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def reopen_eng(request, eid):
eng = Engagement.objects.get(id=eid)
reopen_engagement(eng)
messages.add_message(
request,
messages.SUCCESS,
'Engagement reopened successfully.',
extra_tags='alert-success')
create_notification(event='other',
title='Reopening of %s' % eng.name,
engagement=eng,
description='The engagement "%s" was reopened' % (eng.name),
url=reverse('view_engagement', args=(eng.id, ))),
return HttpResponseRedirect(reverse("view_engagements", args=(eng.product.id, )))
"""
Greg:
status: in production
method to complete checklists from the engagement view
"""
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def complete_checklist(request, eid):
eng = get_object_or_404(Engagement, id=eid)
try:
checklist = Check_List.objects.get(engagement=eng)
except:
checklist = None
pass
add_breadcrumb(
parent=eng,
title="Complete checklist",
top_level=False,
request=request)
if request.method == 'POST':
tests = Test.objects.filter(engagement=eng)
findings = Finding.objects.filter(test__in=tests).all()
form = CheckForm(request.POST, instance=checklist, findings=findings)
if form.is_valid():
cl = form.save(commit=False)
try:
check_l = Check_List.objects.get(engagement=eng)
cl.id = check_l.id
cl.save()
form.save_m2m()
except:
cl.engagement = eng
cl.save()
form.save_m2m()
pass
messages.add_message(
request,
messages.SUCCESS,
'Checklist saved.',
extra_tags='alert-success')
return HttpResponseRedirect(
reverse('view_engagement', args=(eid, )))
else:
tests = Test.objects.filter(engagement=eng)
findings = Finding.objects.filter(test__in=tests).all()
form = CheckForm(instance=checklist, findings=findings)
product_tab = Product_Tab(eng.product.id, title="Checklist", tab="engagements")
product_tab.setEngagement(eng)
return render(request, 'dojo/checklist.html', {
'form': form,
'product_tab': product_tab,
'eid': eng.id,
'findings': findings,
})
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def add_risk_acceptance(request, eid, fid=None):
eng = get_object_or_404(Engagement, id=eid)
finding = None
if fid:
finding = get_object_or_404(Finding, id=fid)
if not eng.product.enable_full_risk_acceptance:
raise PermissionDenied()
if request.method == 'POST':
form = RiskAcceptanceForm(request.POST, request.FILES)
if form.is_valid():
# first capture notes param as it cannot be saved directly as m2m
notes = None
if form.cleaned_data['notes']:
notes = Notes(
entry=form.cleaned_data['notes'],
author=request.user,
date=timezone.now())
notes.save()
del form.cleaned_data['notes']
try:
# we sometimes see a weird exception here, but are unable to reproduce.
# we add some logging in case it happens
risk_acceptance = form.save()
except Exception as e:
logger.debug(vars(request.POST))
logger.error(vars(form))
logger.exception(e)
raise
# attach note to risk acceptance object now in database
if notes:
risk_acceptance.notes.add(notes)
eng.risk_acceptance.add(risk_acceptance)
findings = form.cleaned_data['accepted_findings']
risk_acceptance = ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings)
messages.add_message(
request,
messages.SUCCESS,
'Risk acceptance saved.',
extra_tags='alert-success')
return redirect_to_return_url_or_else(request, reverse('view_engagement', args=(eid, )))
else:
risk_acceptance_title_suggestion = 'Accept: %s' % finding
form = RiskAcceptanceForm(initial={'owner': request.user, 'name': risk_acceptance_title_suggestion})
finding_choices = Finding.objects.filter(duplicate=False, test__engagement=eng).filter(NOT_ACCEPTED_FINDINGS_QUERY).order_by('title')
form.fields['accepted_findings'].queryset = finding_choices
if fid:
form.fields['accepted_findings'].initial = {fid}
product_tab = Product_Tab(eng.product.id, title="Risk Acceptance", tab="engagements")
product_tab.setEngagement(eng)
return render(request, 'dojo/add_risk_acceptance.html', {
'eng': eng,
'product_tab': product_tab,
'form': form
})
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def view_risk_acceptance(request, eid, raid):
return view_edit_risk_acceptance(request, eid=eid, raid=raid, edit_mode=False)
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def edit_risk_acceptance(request, eid, raid):
return view_edit_risk_acceptance(request, eid=eid, raid=raid, edit_mode=True)
# will only be called by view_risk_acceptance and edit_risk_acceptance
def view_edit_risk_acceptance(request, eid, raid, edit_mode=False):
risk_acceptance = get_object_or_404(Risk_Acceptance, pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
if edit_mode and not eng.product.enable_full_risk_acceptance:
raise PermissionDenied()
risk_acceptance_form = None
errors = False
if request.method == 'POST':
# deleting before instantiating the form otherwise django messes up and we end up with an empty path value
if len(request.FILES) > 0:
logger.debug('new proof uploaded')
risk_acceptance.path.delete()
if 'decision' in request.POST:
old_expiration_date = risk_acceptance.expiration_date
risk_acceptance_form = EditRiskAcceptanceForm(request.POST, request.FILES, instance=risk_acceptance)
errors = errors or not risk_acceptance_form.is_valid()
if not errors:
logger.debug('path: %s', risk_acceptance_form.cleaned_data['path'])
risk_acceptance_form.save()
if risk_acceptance.expiration_date != old_expiration_date:
# risk acceptance was changed, check if risk acceptance needs to be reinstated and findings made accepted again
ra_helper.reinstate(risk_acceptance, old_expiration_date)
messages.add_message(
request,
messages.SUCCESS,
'Risk Acceptance saved successfully.',
extra_tags='alert-success')
if 'entry' in request.POST:
note_form = NoteForm(request.POST)
errors = errors or not note_form.is_valid()
if not errors:
new_note = note_form.save(commit=False)
new_note.author = request.user
new_note.date = timezone.now()
new_note.save()
risk_acceptance.notes.add(new_note)
messages.add_message(
request,
messages.SUCCESS,
'Note added successfully.',
extra_tags='alert-success')
if 'delete_note' in request.POST:
note = get_object_or_404(Notes, pk=request.POST['delete_note_id'])
if note.author.username == request.user.username:
risk_acceptance.notes.remove(note)
note.delete()
messages.add_message(
request,
messages.SUCCESS,
'Note deleted successfully.',
extra_tags='alert-success')
else:
messages.add_message(
request,
messages.ERROR,
"Since you are not the note's author, it was not deleted.",
extra_tags='alert-danger')
if 'remove_finding' in request.POST:
finding = get_object_or_404(
Finding, pk=request.POST['remove_finding_id'])
ra_helper.remove_finding_from_risk_acceptance(risk_acceptance, finding)
messages.add_message(
request,
messages.SUCCESS,
'Finding removed successfully from risk acceptance.',
extra_tags='alert-success')
if 'replace_file' in request.POST:
replace_form = ReplaceRiskAcceptanceProofForm(
request.POST, request.FILES, instance=risk_acceptance)
errors = errors or not replace_form.is_valid()
if not errors:
replace_form.save()
messages.add_message(
request,
messages.SUCCESS,
'New Proof uploaded successfully.',
extra_tags='alert-success')
else:
logger.error(replace_form.errors)
if 'add_findings' in request.POST:
add_findings_form = AddFindingsRiskAcceptanceForm(
request.POST, request.FILES, instance=risk_acceptance)
errors = errors or not add_findings_form.is_valid()
if not errors:
findings = add_findings_form.cleaned_data['accepted_findings']
ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings)
messages.add_message(
request,
messages.SUCCESS,
'Finding%s added successfully.' % ('s' if len(findings) > 1
else ''),
extra_tags='alert-success')
if not errors:
logger.debug('redirecting to return_url')
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
else:
logger.error('errors found')
else:
if edit_mode:
risk_acceptance_form = EditRiskAcceptanceForm(instance=risk_acceptance)
note_form = NoteForm()
replace_form = ReplaceRiskAcceptanceProofForm(instance=risk_acceptance)
add_findings_form = AddFindingsRiskAcceptanceForm(instance=risk_acceptance)
accepted_findings = risk_acceptance.accepted_findings.order_by('numerical_severity')
fpage = get_page_items(request, accepted_findings, 15)
unaccepted_findings = Finding.objects.filter(test__in=eng.test_set.all()) \
.exclude(id__in=accepted_findings).order_by("title")
add_fpage = get_page_items(request, unaccepted_findings, 10, 'apage')
# on this page we need to add unaccepted findings as possible findings to add as accepted
add_findings_form.fields[
"accepted_findings"].queryset = add_fpage.object_list
product_tab = Product_Tab(eng.product.id, title="Risk Acceptance", tab="engagements")
product_tab.setEngagement(eng)
return render(
request, 'dojo/view_risk_acceptance.html', {
'risk_acceptance': risk_acceptance,
'engagement': eng,
'product_tab': product_tab,
'accepted_findings': fpage,
'notes': risk_acceptance.notes.all(),
'eng': eng,
'edit_mode': edit_mode,
'risk_acceptance_form': risk_acceptance_form,
'note_form': note_form,
'replace_form': replace_form,
'add_findings_form': add_findings_form,
# 'show_add_findings_form': len(unaccepted_findings),
'request': request,
'add_findings': add_fpage,
'return_url': get_return_url(request),
})
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def expire_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(prefetch_for_expiration(Risk_Acceptance.objects.all()), pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
ra_helper.expire_now(risk_acceptance)
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def reinstate_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(prefetch_for_expiration(Risk_Acceptance.objects.all()), pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
if not eng.product.enable_full_risk_acceptance:
raise PermissionDenied()
ra_helper.reinstate(risk_acceptance, risk_acceptance.expiration_date)
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid')
def delete_risk_acceptance(request, eid, raid):
risk_acceptance = get_object_or_404(Risk_Acceptance, pk=raid)
eng = get_object_or_404(Engagement, pk=eid)
ra_helper.delete(eng, risk_acceptance)
messages.add_message(
request,
messages.SUCCESS,
'Risk acceptance deleted successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse("view_engagement", args=(eng.id, )))
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def download_risk_acceptance(request, eid, raid):
import mimetypes
mimetypes.init()
risk_acceptance = get_object_or_404(Risk_Acceptance, pk=raid)
response = StreamingHttpResponse(
FileIterWrapper(
open(settings.MEDIA_ROOT + "/" + risk_acceptance.path.name, mode='rb')))
response['Content-Disposition'] = 'attachment; filename="%s"' \
% risk_acceptance.filename()
mimetype, encoding = mimetypes.guess_type(risk_acceptance.path.name)
response['Content-Type'] = mimetype
return response
"""
Greg
status: in production
Upload a threat model at the engagement level. Threat models are stored
under media folder
"""
@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid')
def upload_threatmodel(request, eid):
eng = Engagement.objects.get(id=eid)
add_breadcrumb(
parent=eng,
title="Upload a threat model",
top_level=False,
request=request)
if request.method == 'POST':
form = UploadThreatForm(request.POST, request.FILES)
if form.is_valid():
handle_uploaded_threat(request.FILES['file'], eng)
eng.progress = 'other'
eng.threat_model = True
eng.save()
messages.add_message(
request,
messages.SUCCESS,
'Threat model saved.',
extra_tags='alert-success')
return HttpResponseRedirect(
reverse('view_engagement', args=(eid, )))
else:
form = UploadThreatForm()
product_tab = Product_Tab(eng.product.id, title="Upload Threat Model", tab="engagements")
return render(request, 'dojo/up_threat.html', {
'form': form,
'product_tab': product_tab,
'eng': eng,
})
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def view_threatmodel(request, eid):
eng = get_object_or_404(Engagement, pk=eid)
response = FileResponse(open(eng.tmodel_path, 'rb'))
return response
@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid')
def engagement_ics(request, eid):
eng = get_object_or_404(Engagement, id=eid)
start_date = datetime.combine(eng.target_start, datetime.min.time())
end_date = datetime.combine(eng.target_end, datetime.max.time())
uid = "dojo_eng_%d_%d" % (eng.id, eng.product.id)
cal = get_cal_event(
start_date, end_date,
"Engagement: %s (%s)" % (eng.name, eng.product.name),
"Set aside for engagement %s, on product %s. Additional detail can be found at %s"
% (eng.name, eng.product.name,
request.build_absolute_uri(
(reverse("view_engagement", args=(eng.id, ))))), uid)
output = cal.serialize()
response = HttpResponse(content=output)
response['Content-Type'] = 'text/calendar'
response['Content-Disposition'] = 'attachment; filename=%s.ics' % eng.name
return response
def get_list_index(list, index):
try:
element = list[index]
except Exception as e:
element = None
return element
def get_engagements(request):
url = request.META.get('QUERY_STRING')
if not url:
raise ValidationError('Please use the export button when exporting engagements')
else:
if url.startswith('url='):
url = url[4:]
path_items = list(filter(None, re.split('/|\?', url))) # noqa W605
if not path_items or path_items[0] != 'engagement':
raise ValidationError('URL is not an engagement view')
view = query = None
if get_list_index(path_items, 1) in ['active', 'all']:
view = get_list_index(path_items, 1)
query = get_list_index(path_items, 2)
else:
view = 'active'
query = get_list_index(path_items, 1)
request.GET = QueryDict(query)
engagements = get_filtered_engagements(request, view).qs
test_counts = get_test_counts(engagements)
return engagements, test_counts
def get_excludes():
return ['is_ci_cd', 'jira_issue', 'jira_project', 'objects', 'unaccepted_open_findings']
def get_foreign_keys():
return ['build_server', 'lead', 'orchestration_engine', 'preset', 'product',
'report_type', 'requester', 'source_code_management_server']
def csv_export(request):
engagements, test_counts = get_engagements(request)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=engagements.csv'
writer = csv.writer(response)
first_row = True
for engagement in engagements:
if first_row:
fields = []
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
fields.append(key)
fields.append('tests')
writer.writerow(fields)
first_row = False
if not first_row:
fields = []
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
value = engagement.__dict__.get(key)
if key in get_foreign_keys() and getattr(engagement, key):
value = str(getattr(engagement, key))
if value and isinstance(value, str):
value = value.replace('\n', ' NEWLINE ').replace('\r', '')
fields.append(value)
fields.append(test_counts.get(engagement.id, 0))
writer.writerow(fields)
return response
def excel_export(request):
engagements, test_counts = get_engagements(request)
workbook = Workbook()
workbook.iso_dates = True
worksheet = workbook.active
worksheet.title = 'Engagements'
font_bold = Font(bold=True)
row_num = 1
for engagement in engagements:
if row_num == 1:
col_num = 1
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
cell = worksheet.cell(row=row_num, column=col_num, value=key)
cell.font = font_bold
col_num += 1
cell = worksheet.cell(row=row_num, column=col_num, value='tests')
cell.font = font_bold
row_num = 2
if row_num > 1:
col_num = 1
for key in dir(engagement):
if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'):
value = engagement.__dict__.get(key)
if key in get_foreign_keys() and getattr(engagement, key):
value = str(getattr(engagement, key))
if value and isinstance(value, datetime):
value = value.replace(tzinfo=None)
worksheet.cell(row=row_num, column=col_num, value=value)
col_num += 1
worksheet.cell(row=row_num, column=col_num, value=test_counts.get(engagement.id, 0))
row_num += 1
with NamedTemporaryFile() as tmp:
workbook.save(tmp.name)
tmp.seek(0)
stream = tmp.read()
response = HttpResponse(
content=stream,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = 'attachment; filename=engagements.xlsx'
return response | 'product_tab': product_tab,
'system_settings': system_settings,
'tests': paged_tests, |
hash.rs | /*!
# `CheckSame` - Hasher
*/
use super::CheckSameError;
use blake3::{
Hash,
Hasher,
};
use rayon::{
iter::{
IntoParallelIterator,
ParallelIterator,
},
slice::ParallelSliceMut,
};
use std::{
fmt,
fs::File,
os::unix::ffi::OsStrExt,
path::{
Path,
PathBuf,
},
};
/// # Reset First.
///
/// When set, all existing `CheckSame` cache files will be deleted. When
/// combined with [`FLAG_CACHE`], the result will always look "new".
pub(super) const FLAG_RESET: u8 = 0b0001;
/// # Cache Mode.
///
/// Print the change status rather than the hash. This is either -1, 1, or 0,
/// indicating no previous cache, something changed, or all's the same,
/// respectively.
pub(super) const FLAG_CACHE: u8 = 0b0010;
#[derive(Debug, Clone, Copy)]
/// # Status.
///
/// This is a list of cache statuses, used internally by [`CheckSame`].
enum CheckedSame {
/// # We aren't worried about caching.
Noop,
/// # No change.
Same,
/// # The cache changed.
Changed,
/// # No previous cache.
New,
}
#[derive(Debug)]
/// # `CheckSame`.
///
/// This struct holds the hash data for a set of paths. The only public-facing
/// method is [`CheckSame::new`], which does all the work.
///
/// The resulting object can be sent to any formatted writer accepting
/// `Display`. If [`FLAG_CACHE`] is set, this will print the status; otherwise
/// the hash is printed.
pub(super) struct | {
/// # Key Hash.
///
/// This hash is used to calculate a unique file path for the set. It is
/// calculated by hashing all of the file paths in order.
key: Hash,
/// # Hash.
///
/// This is the cumulative `Blake3` hash of all included files. It is
/// calculated by hashing each file individually, in order, then hashing
/// those hashes.
///
/// This avoids the overhead of having to keep all file contents in memory
/// long enough to come up with a single hash.
hash: Hash,
/// # Cache status.
///
/// This holds the cache status of the set. When not in cache mode, this is
/// always [`CheckedSame::Noop`] and serves no purpose.
status: CheckedSame,
}
impl fmt::Display for CheckSame {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.status {
CheckedSame::Noop => f.write_str(&self.hash.to_hex()),
CheckedSame::Same => f.write_str("0"),
CheckedSame::Changed => f.write_str("1"),
CheckedSame::New => f.write_str("-1"),
}
}
}
impl From<Vec<PathBuf>> for CheckSame {
fn from(paths: Vec<PathBuf>) -> Self {
// First pass, hash all the files, consuming the original vector.
let mut raw: Vec<(PathBuf, Option<[u8; 32]>)> = paths.into_par_iter()
.map(|p| {
let hash = hash_file(&p);
(p, hash)
})
.collect();
// Resort by path for consistency.
raw.par_sort_by(|(a, _), (b, _)| a.cmp(b));
// Second pass, build the cumulative file/key hashes.
let mut all_h = Hasher::new();
let mut key_h = Hasher::new();
for (p, h) in raw {
key_h.update(p.as_os_str().as_bytes());
if let Some(hash) = h.as_ref() {
all_h.update(hash);
}
}
// We're done!
Self {
key: key_h.finalize(),
hash: all_h.finalize(),
status: CheckedSame::Noop,
}
}
}
impl CheckSame {
/// # New Instance.
///
/// This generates a new instance from a set of paths and flags. This is
/// the only public method for the struct; after running, make use of its
/// `Display` implementation to print the results.
///
/// If [`FLAG_RESET`] is passed, all existing cache references will be
/// cleared.
///
/// If [`FLAG_CACHE`] is passed, the resulting hash will be cached to a
/// temporary file, and `Display` will print the status (compared to any
/// previous cache) rather than the hash itself.
///
/// ## Errors
///
/// This will return an error if the path list is empty or any reset/cache
/// operations fail for any reason.
pub(crate) fn new(paths: Vec<PathBuf>, flags: u8) -> Result<Self, CheckSameError> {
// If there are no paths, there's (probably) nothing for us to do.
if paths.is_empty() {
// We need to reset any other random caches before leaving.
// Assuming the reset goes all right, this is a no-op rather than
// a shameful error.
if 0 != flags & FLAG_RESET {
reset(&tmp_dir()?)?;
return Err(CheckSameError::Noop);
}
// Otherwise shame!
return Err(CheckSameError::NoFiles);
}
// Consume and build.
let mut out = Self::from(paths);
// We need to do something with the cache directory.
if 0 < flags {
let cache_dir = tmp_dir()?;
// Reset the cache?
if 0 != flags & FLAG_RESET {
reset(&cache_dir)?;
}
// Check the cache?
if 0 != flags & FLAG_CACHE {
out.check_same(cache_dir)?;
}
}
Ok(out)
}
/// # Check Sameness.
///
/// This checks the temporary file cache to see if there is a previous
/// result for the path set, and if that value was different than the new
/// hash.
///
/// If there is a change, the cache is updated accordingly.
///
/// ## Errors
///
/// This method returns an error if the cache file cannot be written to.
fn check_same(&mut self, mut path: PathBuf) -> Result<(), CheckSameError> {
use std::io::Write;
// Generate a file path for the cache.
let key: &str = &self.key.to_hex();
path.push(key);
// Get the hash as bytes.
let bytes: &[u8] = self.hash.as_bytes();
// This is already cached.
if path.is_file() {
// If it is unchanged, we're done!
if std::fs::read(&path).unwrap_or_default() == bytes {
self.status = CheckedSame::Same;
return Ok(());
}
self.status = CheckedSame::Changed;
}
// This is something new.
else {
self.status = CheckedSame::New;
}
// Save it for next time.
File::create(&path)
.and_then(|mut out| out.write_all(bytes).and_then(|_| out.flush()))
.map_err(|_| CheckSameError::Write)
}
}
/// # Hash File.
///
/// Hash the contents of a file path if possible, returning the hash bytes on
/// success.
fn hash_file(path: &Path) -> Option<[u8; 32]> {
let mut file = File::open(path).ok()?;
let mut hasher = Hasher::new();
std::io::copy(&mut file, &mut hasher).ok()?;
Some(*(hasher.finalize().as_bytes()))
}
/// # Reset Cache.
///
/// This will attempt to remove all `CheckSame` caches, which are just stored
/// as files in a temporary directory generated by the program.
///
/// Note: this does not delete the directory itself; that is preserved for
/// future use.
///
/// ## Errors
///
/// This method returns an error in cases where the temporary directory cannot
/// be read, or any files within it cannot be deleted.
fn reset(cache_dir: &Path) -> Result<(), CheckSameError> {
let entries = std::fs::read_dir(cache_dir).map_err(|_| CheckSameError::Reset)?;
entries.filter_map(Result::ok).try_for_each(|path| {
let path = path.path();
if path.is_file() {
std::fs::remove_file(path).map_err(|_| CheckSameError::Reset)?;
}
Ok(())
})
}
/// # Get/Make Temporary Directory.
///
/// This retrieves/creates a temporary directory to store `CheckSame` cache
/// files in.
///
/// ## Errors
///
/// This will return an error if the directory path is blocked or cannot be
/// created.
fn tmp_dir() -> Result<PathBuf, CheckSameError> {
let mut dir = std::env::temp_dir();
dir.push("checksame");
// The directory has to exist.
if ! dir.is_dir() && (dir.exists() || std::fs::create_dir(&dir).is_err()) {
Err(CheckSameError::Tmp)
}
else { Ok(dir) }
}
| CheckSame |
registry_test.go | package commandregistry_test
import (
"strings"
"github.com/cloudfoundry/cli/cf/commandregistry"
. "github.com/cloudfoundry/cli/cf/commandregistry/fakecommand"
. "github.com/cloudfoundry/cli/testhelpers/matchers"
. "github.com/cloudfoundry/cli/cf/i18n"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("CommandRegistry", func() {
BeforeEach(func() {
commandregistry.Commands = commandregistry.NewRegistry() // because other tests load all the commands into the registry
})
Context("i18n", func() {
It("initialize i18n T() func", func() {
Expect(T).ToNot(BeNil())
})
})
| Describe("Register()", func() {
AfterEach(func() {
commandregistry.Commands.RemoveCommand("fake-command2")
})
It("registers a command and its alias into the Command Registry map", func() {
Expect(commandregistry.Commands.CommandExists("fake-command2")).To(BeFalse())
Expect(commandregistry.Commands.CommandExists("fc2")).To(BeFalse())
commandregistry.Register(FakeCommand2{})
Expect(commandregistry.Commands.CommandExists("fake-command2")).To(BeTrue())
Expect(commandregistry.Commands.CommandExists("fc2")).To(BeTrue())
})
})
Describe("CommandExists()", func() {
Context("when the command has been registered", func() {
BeforeEach(func() {
commandregistry.Register(FakeCommand1{})
})
AfterEach(func() {
commandregistry.Commands.RemoveCommand("fake-command")
})
It("returns true the command exists in the list", func() {
Expect(commandregistry.Commands.CommandExists("fake-command")).To(BeTrue())
})
It("returns true if the alias exists", func() {
Expect(commandregistry.Commands.CommandExists("fc1")).To(BeTrue())
})
})
It("returns false when the command has not been registered", func() {
Expect(commandregistry.Commands.CommandExists("non-exist-cmd")).To(BeFalse())
})
It("returns false if the command name is an empty string", func() {
Expect(commandregistry.Commands.CommandExists("")).To(BeFalse())
})
})
Describe("FindCommand()", func() {
Context("when the command has been registered", func() {
BeforeEach(func() {
commandregistry.Register(FakeCommand1{})
})
AfterEach(func() {
commandregistry.Commands.RemoveCommand("fake-command")
})
It("returns the command when the command's name is given", func() {
cmd := commandregistry.Commands.FindCommand("fake-command")
Expect(cmd.MetaData().Usage[0]).To(ContainSubstring("Usage of fake-command"))
Expect(cmd.MetaData().Description).To(Equal("Description for fake-command"))
})
It("returns the command when the command's alias is given", func() {
cmd := commandregistry.Commands.FindCommand("fc1")
Expect(cmd.MetaData().Usage[0]).To(ContainSubstring("Usage of fake-command"))
Expect(cmd.MetaData().Description).To(Equal("Description for fake-command"))
})
})
It("returns nil when the command has not been registered", func() {
cmd := commandregistry.Commands.FindCommand("fake-command")
Expect(cmd).To(BeNil())
})
})
Describe("ShowAllCommands()", func() {
BeforeEach(func() {
commandregistry.Register(FakeCommand1{})
commandregistry.Register(FakeCommand2{})
commandregistry.Register(FakeCommand3{})
})
AfterEach(func() {
commandregistry.Commands.RemoveCommand("fake-command")
commandregistry.Commands.RemoveCommand("fake-command2")
commandregistry.Commands.RemoveCommand("this-is-a-really-long-command-name-123123123123123123123") // fake-command3
})
It("show all the commands in registry", func() {
cmds := commandregistry.Commands.ListCommands()
Expect(cmds).To(ContainElement("fake-command2"))
Expect(cmds).To(ContainElement("this-is-a-really-long-command-name-123123123123123123123"))
Expect(cmds).To(ContainElement("fake-command"))
})
})
Describe("SetCommand()", func() {
It("replaces the command in registry with command provided", func() {
updatedCmd := FakeCommand1{Data: "This is new data"}
oldCmd := commandregistry.Commands.FindCommand("fake-command")
Expect(oldCmd).ToNot(Equal(updatedCmd))
commandregistry.Commands.SetCommand(updatedCmd)
oldCmd = commandregistry.Commands.FindCommand("fake-command")
Expect(oldCmd).To(Equal(updatedCmd))
})
})
Describe("TotalCommands()", func() {
Context("when there are commands registered", func() {
BeforeEach(func() {
commandregistry.Register(FakeCommand1{})
commandregistry.Register(FakeCommand2{})
commandregistry.Register(FakeCommand3{})
})
AfterEach(func() {
commandregistry.Commands.RemoveCommand("fake-command")
commandregistry.Commands.RemoveCommand("fake-command2")
commandregistry.Commands.RemoveCommand("this-is-a-really-long-command-name-123123123123123123123") // fake-command3
})
It("returns the total number of registered commands", func() {
Expect(commandregistry.Commands.TotalCommands()).To(Equal(3))
})
})
It("returns 0 when there are no commands registered", func() {
Expect(commandregistry.Commands.TotalCommands()).To(Equal(0))
})
})
Describe("Metadatas()", func() {
BeforeEach(func() {
commandregistry.Register(FakeCommand1{})
commandregistry.Register(FakeCommand2{})
commandregistry.Register(FakeCommand3{})
})
AfterEach(func() {
commandregistry.Commands.RemoveCommand("fake-command")
commandregistry.Commands.RemoveCommand("fake-command2")
commandregistry.Commands.RemoveCommand("this-is-a-really-long-command-name-123123123123123123123") // fake-command3
})
It("returns all the metadata for all registered commands", func() {
Expect(len(commandregistry.Commands.Metadatas())).To(Equal(3))
})
})
Describe("RemoveCommand()", func() {
BeforeEach(func() {
commandregistry.Register(FakeCommand1{})
})
It("removes the command in registry with command name provided", func() {
commandregistry.Commands.RemoveCommand("fake-command")
Expect(commandregistry.Commands.CommandExists("fake-command")).To(BeFalse())
})
})
Describe("MaxCommandNameLength()", func() {
BeforeEach(func() {
commandregistry.Register(FakeCommand1{})
commandregistry.Register(FakeCommand2{})
commandregistry.Register(FakeCommand3{})
})
AfterEach(func() {
commandregistry.Commands.RemoveCommand("fake-command")
commandregistry.Commands.RemoveCommand("fake-command2")
commandregistry.Commands.RemoveCommand("this-is-a-really-long-command-name-123123123123123123123") // fake-command3
})
It("returns the length of the longest command name", func() {
maxLen := commandregistry.Commands.MaxCommandNameLength()
Expect(maxLen).To(Equal(len("this-is-a-really-long-command-name-123123123123123123123")))
})
})
Describe("CommandUsage()", func() {
BeforeEach(func() {
commandregistry.Register(FakeCommand1{})
})
AfterEach(func() {
commandregistry.Commands.RemoveCommand("fake-command")
})
It("prints the name, description and usage of a command", func() {
o := commandregistry.Commands.CommandUsage("fake-command")
outputs := strings.Split(o, "\n")
Expect(outputs).To(BeInDisplayOrder(
[]string{"NAME:"},
[]string{" fake-command", "Description"},
[]string{"USAGE:"},
))
})
Context("i18n translations", func() {
var originalT func(string, ...interface{}) string
BeforeEach(func() {
originalT = T
})
AfterEach(func() {
T = originalT
})
It("includes ':' in caption translation strings for language like French to be translated correctly", func() {
nameCaption := "NAME:"
aliasCaption := "ALIAS:"
usageCaption := "USAGE:"
optionsCaption := "OPTIONS:"
captionCheckCount := 0
T = func(translationID string, args ...interface{}) string {
if strings.HasPrefix(translationID, "NAME") {
Expect(translationID).To(Equal(nameCaption))
captionCheckCount += 1
} else if strings.HasPrefix(translationID, "ALIAS") {
Expect(translationID).To(Equal(aliasCaption))
captionCheckCount += 1
} else if strings.HasPrefix(translationID, "USAGE") {
Expect(translationID).To(Equal(usageCaption))
captionCheckCount += 1
} else if strings.HasPrefix(translationID, "OPTIONS") {
Expect(translationID).To(Equal(optionsCaption))
captionCheckCount += 1
}
return translationID
}
commandregistry.Commands.CommandUsage("fake-command")
})
})
It("prints the flag options", func() {
o := commandregistry.Commands.CommandUsage("fake-command")
outputs := strings.Split(o, "\n")
Expect(outputs).To(BeInDisplayOrder(
[]string{"NAME:"},
[]string{"USAGE:"},
[]string{"OPTIONS:"},
[]string{"intFlag", "Usage for"},
))
})
It("replaces 'CF_NAME' with executable name from os.Arg[0]", func() {
o := commandregistry.Commands.CommandUsage("fake-command")
outputs := strings.Split(o, "\n")
Expect(outputs).To(BeInDisplayOrder(
[]string{"USAGE:"},
[]string{"cf", "Usage of"},
))
Consistently(outputs).ShouldNot(ContainSubstrings([]string{"CF_NAME"}))
})
})
}) | |
update-user.dto.ts | import { Role } from '../enums/roles.enum';
export interface UpdateUserDTO {
name?: string;
surname?: string;
email?: string;
birthDate?: string;
address?: UserAddress;
role?: Role;
}
export interface UserAddress { | city?: string;
street?: string;
street_address?: string;
apt_address?: string;
} | country?: string; |
keccak.go | // Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package keccak
import (
"fmt"
"gitlab.com/nitya-sattva/go-x11/hash"
)
// HashSize holds the size of a hash in bytes.
const HashSize = int(64)
// BlockSize holds the size of a block in bytes.
const BlockSize = uintptr(72)
////////////////
type digest struct {
ptr uintptr
cnt uintptr
h [25]uint64
b [144]byte
}
// New returns a new digest compute a KECCAK512 hash.
func New() hash.Digest |
////////////////
// Reset resets the digest to its initial state.
func (ref *digest) Reset() {
ref.ptr = 0
ref.cnt = 200 - (512 >> 2)
h := ref.h[:]
h[0] = uint64(0x0)
h[1] = uint64(0xFFFFFFFFFFFFFFFF)
h[2] = uint64(0xFFFFFFFFFFFFFFFF)
h[3] = uint64(0x0)
h[4] = uint64(0x0)
h[5] = uint64(0x0)
h[6] = uint64(0x0)
h[7] = uint64(0x0)
h[8] = uint64(0xFFFFFFFFFFFFFFFF)
h[9] = uint64(0x0)
h[10] = uint64(0x0)
h[11] = uint64(0x0)
h[12] = uint64(0xFFFFFFFFFFFFFFFF)
h[13] = uint64(0x0)
h[14] = uint64(0x0)
h[15] = uint64(0x0)
h[16] = uint64(0x0)
h[17] = uint64(0xFFFFFFFFFFFFFFFF)
h[18] = uint64(0x0)
h[19] = uint64(0x0)
h[20] = uint64(0xFFFFFFFFFFFFFFFF)
h[21] = uint64(0x0)
h[22] = uint64(0x0)
h[23] = uint64(0x0)
h[24] = uint64(0x0)
}
// Sum appends the current hash to dst and returns the result
// as a slice. It does not change the underlying hash state.
func (ref *digest) Sum(dst []byte) []byte {
dgt := *ref
hsh := [64]byte{}
dgt.Close(hsh[:], 0, 0)
return append(dst, hsh[:]...)
}
// Write more data to the running hash, never returns an error.
func (ref *digest) Write(src []byte) (int, error) {
sln := uintptr(len(src))
fln := len(src)
ptr := ref.ptr
buf := ref.b[:]
sta := ref.h[:]
if sln < (BlockSize - ptr) {
copy(ref.b[ptr:], src)
ref.ptr += sln
return int(sln), nil
}
for sln > 0 {
cln := BlockSize - ptr
if cln > sln {
cln = sln
}
sln -= cln
copy(ref.b[ptr:], src[:cln])
src = src[cln:]
ptr += cln
if ptr == BlockSize {
sta[0] ^= decUInt64le(buf[0:])
sta[1] ^= decUInt64le(buf[8:])
sta[2] ^= decUInt64le(buf[16:])
sta[3] ^= decUInt64le(buf[24:])
sta[4] ^= decUInt64le(buf[32:])
sta[5] ^= decUInt64le(buf[40:])
sta[6] ^= decUInt64le(buf[48:])
sta[7] ^= decUInt64le(buf[56:])
sta[8] ^= decUInt64le(buf[64:])
for j := uintptr(0); j < 24; j++ {
var t0, t1, t2, t3, t4, tp uint64
{
var tt0, tt1, tt2, tt3 uint64
tt0 = sta[1] ^ sta[6]
tt1 = sta[11] ^ sta[16]
tt0 = tt0 ^ sta[21]
tt0 = tt0 ^ tt1
tt0 = (tt0 << 1) | (tt0 >> (64 - 1))
tt2 = sta[4] ^ sta[9]
tt3 = sta[14] ^ sta[19]
tt0 = tt0 ^ sta[24]
tt2 = tt2 ^ tt3
t0 = tt0 ^ tt2
tt0 = sta[2] ^ sta[7]
tt1 = sta[12] ^ sta[17]
tt0 = tt0 ^ sta[22]
tt0 = tt0 ^ tt1
tt0 = (tt0 << 1) | (tt0 >> (64 - 1))
tt2 = sta[0] ^ sta[5]
tt3 = sta[10] ^ sta[15]
tt0 = tt0 ^ sta[20]
tt2 = tt2 ^ tt3
t1 = tt0 ^ tt2
tt0 = sta[3] ^ sta[8]
tt1 = sta[13] ^ sta[18]
tt0 = tt0 ^ sta[23]
tt0 = tt0 ^ tt1
tt0 = (tt0 << 1) | (tt0 >> (64 - 1))
tt2 = sta[1] ^ sta[6]
tt3 = sta[11] ^ sta[16]
tt0 = tt0 ^ sta[21]
tt2 = tt2 ^ tt3
t2 = tt0 ^ tt2
tt0 = sta[4] ^ sta[9]
tt1 = sta[14] ^ sta[19]
tt0 = tt0 ^ sta[24]
tt0 = tt0 ^ tt1
tt0 = (tt0 << 1) | (tt0 >> (64 - 1))
tt2 = sta[2] ^ sta[7]
tt3 = sta[12] ^ sta[17]
tt0 = tt0 ^ sta[22]
tt2 = tt2 ^ tt3
t3 = tt0 ^ tt2
tt0 = sta[0] ^ sta[5]
tt1 = sta[10] ^ sta[15]
tt0 = tt0 ^ sta[20]
tt0 = tt0 ^ tt1
tt0 = (tt0 << 1) | (tt0 >> (64 - 1))
tt2 = sta[3] ^ sta[8]
tt3 = sta[13] ^ sta[18]
tt0 = tt0 ^ sta[23]
tt2 = tt2 ^ tt3
t4 = tt0 ^ tt2
}
sta[0] = sta[0] ^ t0
sta[1] = sta[1] ^ t1
sta[2] = sta[2] ^ t2
sta[3] = sta[3] ^ t3
sta[4] = sta[4] ^ t4
sta[5] = sta[5] ^ t0
sta[6] = sta[6] ^ t1
sta[7] = sta[7] ^ t2
sta[8] = sta[8] ^ t3
sta[9] = sta[9] ^ t4
sta[10] = sta[10] ^ t0
sta[11] = sta[11] ^ t1
sta[12] = sta[12] ^ t2
sta[13] = sta[13] ^ t3
sta[14] = sta[14] ^ t4
sta[15] = sta[15] ^ t0
sta[16] = sta[16] ^ t1
sta[17] = sta[17] ^ t2
sta[18] = sta[18] ^ t3
sta[23] = sta[23] ^ t3
sta[19] = sta[19] ^ t4
sta[20] = sta[20] ^ t0
sta[22] = sta[22] ^ t2
sta[21] = sta[21] ^ t1
sta[24] = sta[24] ^ t4
sta[1] = (sta[1] << 1) | (sta[1] >> (64 - 1))
sta[2] = (sta[2] << 62) | (sta[2] >> (64 - 62))
sta[3] = (sta[3] << 28) | (sta[3] >> (64 - 28))
sta[4] = (sta[4] << 27) | (sta[4] >> (64 - 27))
sta[5] = (sta[5] << 36) | (sta[5] >> (64 - 36))
sta[6] = (sta[6] << 44) | (sta[6] >> (64 - 44))
sta[7] = (sta[7] << 6) | (sta[7] >> (64 - 6))
sta[8] = (sta[8] << 55) | (sta[8] >> (64 - 55))
sta[9] = (sta[9] << 20) | (sta[9] >> (64 - 20))
sta[10] = (sta[10] << 3) | (sta[10] >> (64 - 3))
sta[11] = (sta[11] << 10) | (sta[11] >> (64 - 10))
sta[12] = (sta[12] << 43) | (sta[12] >> (64 - 43))
sta[13] = (sta[13] << 25) | (sta[13] >> (64 - 25))
sta[14] = (sta[14] << 39) | (sta[14] >> (64 - 39))
sta[15] = (sta[15] << 41) | (sta[15] >> (64 - 41))
sta[16] = (sta[16] << 45) | (sta[16] >> (64 - 45))
sta[17] = (sta[17] << 15) | (sta[17] >> (64 - 15))
sta[18] = (sta[18] << 21) | (sta[18] >> (64 - 21))
sta[19] = (sta[19] << 8) | (sta[19] >> (64 - 8))
sta[20] = (sta[20] << 18) | (sta[20] >> (64 - 18))
sta[21] = (sta[21] << 2) | (sta[21] >> (64 - 2))
sta[22] = (sta[22] << 61) | (sta[22] >> (64 - 61))
sta[23] = (sta[23] << 56) | (sta[23] >> (64 - 56))
sta[24] = (sta[24] << 14) | (sta[24] >> (64 - 14))
tp = ^sta[12]
t0 = sta[6] | sta[12]
t0 = sta[0] ^ t0
t1 = tp | sta[18]
t1 = sta[6] ^ t1
t2 = sta[18] & sta[24]
t2 = sta[12] ^ t2
t3 = sta[24] | sta[0]
t3 = sta[18] ^ t3
t4 = sta[0] & sta[6]
t4 = sta[24] ^ t4
sta[0] = t0
sta[6] = t1
sta[12] = t2
sta[18] = t3
sta[24] = t4
tp = ^sta[22]
t0 = sta[9] | sta[10]
t0 = sta[3] ^ t0
t1 = sta[10] & sta[16]
t1 = sta[9] ^ t1
t2 = sta[16] | tp
t2 = sta[10] ^ t2
t3 = sta[22] | sta[3]
t3 = sta[16] ^ t3
t4 = sta[3] & sta[9]
t4 = sta[22] ^ t4
sta[3] = t0
sta[9] = t1
sta[10] = t2
sta[16] = t3
sta[22] = t4
tp = ^sta[19]
t0 = sta[7] | sta[13]
t0 = sta[1] ^ t0
t1 = sta[13] & sta[19]
t1 = sta[7] ^ t1
t2 = tp & sta[20]
t2 = sta[13] ^ t2
t3 = sta[20] | sta[1]
t3 = tp ^ t3
t4 = sta[1] & sta[7]
t4 = sta[20] ^ t4
sta[1] = t0
sta[7] = t1
sta[13] = t2
sta[19] = t3
sta[20] = t4
tp = ^sta[17]
t0 = sta[5] & sta[11]
t0 = sta[4] ^ t0
t1 = sta[11] | sta[17]
t1 = sta[5] ^ t1
t2 = tp | sta[23]
t2 = sta[11] ^ t2
t3 = sta[23] & sta[4]
t3 = tp ^ t3
t4 = sta[4] | sta[5]
t4 = sta[23] ^ t4
sta[4] = t0
sta[5] = t1
sta[11] = t2
sta[17] = t3
sta[23] = t4
tp = ^sta[8]
t0 = tp & sta[14]
t0 = sta[2] ^ t0
t1 = sta[14] | sta[15]
t1 = tp ^ t1
t2 = sta[15] & sta[21]
t2 = sta[14] ^ t2
t3 = sta[21] | sta[2]
t3 = sta[15] ^ t3
t4 = sta[2] & sta[8]
t4 = sta[21] ^ t4
sta[2] = t0
sta[8] = t1
sta[14] = t2
sta[15] = t3
sta[21] = t4
sta[0] = sta[0] ^ kSpec[j+0]
t0 = sta[5]
sta[5] = sta[3]
sta[3] = sta[18]
sta[18] = sta[17]
sta[17] = sta[11]
sta[11] = sta[7]
sta[7] = sta[10]
sta[10] = sta[1]
sta[1] = sta[6]
sta[6] = sta[9]
sta[9] = sta[22]
sta[22] = sta[14]
sta[14] = sta[20]
sta[20] = sta[2]
sta[2] = sta[12]
sta[12] = sta[13]
sta[13] = sta[19]
sta[19] = sta[23]
sta[23] = sta[15]
sta[15] = sta[4]
sta[4] = sta[24]
sta[24] = sta[21]
sta[21] = sta[8]
sta[8] = sta[16]
sta[16] = t0
}
ptr = 0
}
}
ref.ptr = ptr
return fln, nil
}
// Close the digest by writing the last bits and storing the hash
// in dst. This prepares the digest for reuse by calling reset. A call
// to Close with a dst that is smaller then HashSize will return an error.
func (ref *digest) Close(dst []byte, bits uint8, bcnt uint8) error {
if ln := len(dst); HashSize > ln {
return fmt.Errorf("Keccak Close: dst min length: %d, got %d", HashSize, ln)
}
var tln uintptr
var tmp [73]uint8
off := uint8((uint16(0x100) | uint16(bits&0xFF)) >> (8 - bcnt))
if ref.ptr == (72 - 1) {
if bcnt == 7 {
tmp[0] = off
tmp[72] = 0x80
tln = 1 + 72
} else {
tmp[0] = uint8(off | 0x80)
tln = 1
}
} else {
tln = 72 - ref.ptr
tmp[0] = off
tmp[tln-1] = 0x80
}
ref.Write(tmp[:tln])
ref.h[1] = ^ref.h[1]
ref.h[2] = ^ref.h[2]
ref.h[8] = ^ref.h[8]
ref.h[12] = ^ref.h[12]
ref.h[17] = ^ref.h[17]
ref.h[20] = ^ref.h[20]
for u := uintptr(0); u < 64; u += 8 {
encUInt64le(dst[u:], ref.h[(u>>3)])
}
ref.Reset()
return nil
}
// Size returns the number of bytes required to store the hash.
func (*digest) Size() int {
return HashSize
}
// BlockSize returns the block size of the hash.
func (*digest) BlockSize() int {
return int(BlockSize)
}
////////////////
func decUInt64le(src []byte) uint64 {
return (uint64(src[0]) |
uint64(src[1])<<8 |
uint64(src[2])<<16 |
uint64(src[3])<<24 |
uint64(src[4])<<32 |
uint64(src[5])<<40 |
uint64(src[6])<<48 |
uint64(src[7])<<56)
}
func encUInt64le(dst []byte, src uint64) {
dst[0] = uint8(src)
dst[1] = uint8(src >> 8)
dst[2] = uint8(src >> 16)
dst[3] = uint8(src >> 24)
dst[4] = uint8(src >> 32)
dst[5] = uint8(src >> 40)
dst[6] = uint8(src >> 48)
dst[7] = uint8(src >> 56)
}
////////////////
var kSpec = []uint64{
uint64(0x0000000000000001), uint64(0x0000000000008082),
uint64(0x800000000000808A), uint64(0x8000000080008000),
uint64(0x000000000000808B), uint64(0x0000000080000001),
uint64(0x8000000080008081), uint64(0x8000000000008009),
uint64(0x000000000000008A), uint64(0x0000000000000088),
uint64(0x0000000080008009), uint64(0x000000008000000A),
uint64(0x000000008000808B), uint64(0x800000000000008B),
uint64(0x8000000000008089), uint64(0x8000000000008003),
uint64(0x8000000000008002), uint64(0x8000000000000080),
uint64(0x000000000000800A), uint64(0x800000008000000A),
uint64(0x8000000080008081), uint64(0x8000000000008080),
uint64(0x0000000080000001), uint64(0x8000000080008008),
}
| {
ref := &digest{}
ref.Reset()
return ref
} |
file_test.go | package object
import (
"io"
"gopkg.in/src-d/go-git.v4/fixtures"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
. "gopkg.in/check.v1"
)
type FileSuite struct {
BaseObjectsSuite
}
var _ = Suite(&FileSuite{})
type fileIterExpectedEntry struct {
Name string
Hash string
}
var fileIterTests = []struct {
repo string // the repo name as in localRepos
commit string // the commit to search for the file
files []fileIterExpectedEntry
}{
{"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", []fileIterExpectedEntry{
{".gitignore", "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88"},
{"CHANGELOG", "d3ff53e0564a9f87d8e84b6e28e5060e517008aa"},
{"LICENSE", "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f"},
{"binary.jpg", "d5c0f4ab811897cadf03aec358ae60d21f91c50d"},
{"go/example.go", "880cd14280f4b9b6ed3986d6671f907d7cc2a198"},
{"json/long.json", "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9"},
{"json/short.json", "c8f1d8c61f9da76f4cb49fd86322b6e685dba956"},
{"php/crappy.php", "9a48f23120e880dfbe41f7c9b7b708e9ee62a492"},
{"vendor/foo.go", "9dea2395f5403188298c1dabe8bdafe562c491e3"},
}},
}
func (s *FileSuite) TestIter(c *C) {
for i, t := range fileIterTests {
f := fixtures.ByURL(t.repo).One()
sto, err := filesystem.NewStorage(f.DotGit())
c.Assert(err, IsNil)
h := plumbing.NewHash(t.commit)
commit, err := GetCommit(sto, h)
c.Assert(err, IsNil, Commentf("subtest %d: %v (%s)", i, err, t.commit))
tree, err := commit.Tree()
c.Assert(err, IsNil)
iter := NewFileIter(sto, tree)
for k := 0; k < len(t.files); k++ {
exp := t.files[k]
file, err := iter.Next()
c.Assert(err, IsNil, Commentf("subtest %d, iter %d, err=%v", i, k, err))
c.Assert(file.Mode.String(), Equals, "-rw-r--r--")
c.Assert(file.Hash.IsZero(), Equals, false)
c.Assert(file.Hash, Equals, file.ID())
c.Assert(file.Name, Equals, exp.Name, Commentf("subtest %d, iter %d, name=%s, expected=%s", i, k, file.Name, exp.Hash))
c.Assert(file.Hash.String(), Equals, exp.Hash, Commentf("subtest %d, iter %d, hash=%v, expected=%s", i, k, file.Hash.String(), exp.Hash))
}
_, err = iter.Next()
c.Assert(err, Equals, io.EOF)
}
}
var contentsTests = []struct {
repo string // the repo name as in localRepos
commit string // the commit to search for the file
path string // the path of the file to find
contents string // expected contents of the file
}{
{
"https://github.com/git-fixtures/basic.git",
"b029517f6300c2da0f4b651b8642506cd6aaf45d",
".gitignore",
`*.class
# Mobile Tools for Java (J2ME)
.mtj.tmp/
# Package Files #
*.jar
*.war
*.ear
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
`,
},
{
"https://github.com/git-fixtures/basic.git",
"6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
"CHANGELOG",
`Initial changelog
`,
},
}
func (s *FileSuite) TestContents(c *C) {
for i, t := range contentsTests {
f := fixtures.ByURL(t.repo).One()
sto, err := filesystem.NewStorage(f.DotGit())
c.Assert(err, IsNil)
h := plumbing.NewHash(t.commit)
commit, err := GetCommit(sto, h)
c.Assert(err, IsNil, Commentf("subtest %d: %v (%s)", i, err, t.commit))
file, err := commit.File(t.path)
c.Assert(err, IsNil)
content, err := file.Contents()
c.Assert(err, IsNil)
c.Assert(content, Equals, t.contents, Commentf(
"subtest %d: commit=%s, path=%s", i, t.commit, t.path))
}
}
var linesTests = []struct {
repo string // the repo name as in localRepos
commit string // the commit to search for the file
path string // the path of the file to find
lines []string // expected lines in the file
}{
{
"https://github.com/git-fixtures/basic.git",
"b029517f6300c2da0f4b651b8642506cd6aaf45d",
".gitignore",
[]string{
"*.class",
"",
"# Mobile Tools for Java (J2ME)",
".mtj.tmp/",
"",
"# Package Files #",
"*.jar",
"*.war",
"*.ear",
"",
"# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml",
"hs_err_pid*",
},
},
{
"https://github.com/git-fixtures/basic.git",
"6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
"CHANGELOG",
[]string{
"Initial changelog",
},
},
}
func (s *FileSuite) TestLines(c *C) {
for i, t := range linesTests {
f := fixtures.ByURL(t.repo).One()
sto, err := filesystem.NewStorage(f.DotGit())
c.Assert(err, IsNil)
h := plumbing.NewHash(t.commit)
commit, err := GetCommit(sto, h)
c.Assert(err, IsNil, Commentf("subtest %d: %v (%s)", i, err, t.commit))
file, err := commit.File(t.path)
c.Assert(err, IsNil)
lines, err := file.Lines()
c.Assert(err, IsNil)
c.Assert(lines, DeepEquals, t.lines, Commentf(
"subtest %d: commit=%s, path=%s", i, t.commit, t.path))
}
}
var ignoreEmptyDirEntriesTests = []struct {
repo string // the repo name as in localRepos
commit string // the commit to search for the file
}{
{
"https://github.com/cpcs499/Final_Pres_P.git",
"70bade703ce556c2c7391a8065c45c943e8b6bc3",
// the Final dir in this commit is empty
},
}
// It is difficult to assert that we are ignoring an (empty) dir as even
// if we don't, no files will be found in it.
//
// At least this test has a high chance of panicking if
// we don't ignore empty dirs.
func (s *FileSuite) TestIgnoreEmptyDirEntries(c *C) {
for i, t := range ignoreEmptyDirEntriesTests {
f := fixtures.ByURL(t.repo).One()
sto, err := filesystem.NewStorage(f.DotGit())
c.Assert(err, IsNil)
h := plumbing.NewHash(t.commit)
commit, err := GetCommit(sto, h)
c.Assert(err, IsNil, Commentf("subtest %d: %v (%s)", i, err, t.commit))
tree, err := commit.Tree()
c.Assert(err, IsNil)
iter := tree.Files()
defer iter.Close()
for file, err := iter.Next(); err == nil; file, err = iter.Next() {
_, _ = file.Contents()
// this would probably panic if we are not ignoring empty dirs
}
} | hash := plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea")
commit, err := GetCommit(s.Storer, hash)
c.Assert(err, IsNil)
tree, err := commit.Tree()
c.Assert(err, IsNil)
expected := []string{
".gitignore",
"CHANGELOG",
"LICENSE",
"binary.jpg",
}
var count int
i := tree.Files()
i.ForEach(func(f *File) error {
c.Assert(f.Name, Equals, expected[count])
count++
return nil
})
c.Assert(count, Equals, 4)
count = 0
i = tree.Files()
i.ForEach(func(f *File) error {
count++
return storer.ErrStop
})
c.Assert(count, Equals, 1)
} | }
func (s *FileSuite) TestFileIter(c *C) { |
test_events.py | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the framework events.
:author: Thomas Calmant
"""
# Standard library
try:
import unittest2 as unittest
except ImportError:
import unittest
# Pelix
from pelix.framework import FrameworkFactory, Bundle, BundleException, \
BundleContext, BundleEvent, ServiceEvent
from pelix.services import SERVICE_EVENT_LISTENER_HOOK
# Tests
from tests import log_on, log_off
from tests.interfaces import IEchoService
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
SERVICE_BUNDLE = "tests.framework.service_bundle"
SIMPLE_BUNDLE = "tests.framework.simple_bundle"
# ------------------------------------------------------------------------------
class BundleEventTest(unittest.TestCase):
"""
Pelix bundle event tests
"""
def setUp(self):
"""
Called before each test. Initiates a framework.
"""
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SIMPLE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
"""
Called after each test
"""
self.framework.stop()
FrameworkFactory.delete_framework()
def reset_state(self):
"""
Resets the flags
"""
del self.received[:]
def bundle_changed(self, event):
"""
Called by the framework when a bundle event is triggered
@param event: The BundleEvent
"""
assert isinstance(event, BundleEvent)
bundle = event.get_bundle()
kind = event.get_kind()
if self.bundle is not None \
and kind == BundleEvent.INSTALLED:
# Bundle is not yet locally known...
self.assertIs(self.bundle, bundle,
"Received an event for an other bundle.")
self.assertNotIn(kind, self.received, "Event received twice")
self.received.append(kind)
def testBundleEvents(self):
"""
Tests if the signals are correctly received
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_bundle_listener(self),
"Can't register the bundle listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Assert the Install events has been received
self.assertEqual([BundleEvent.INSTALLED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([BundleEvent.STARTING, BundleEvent.STARTED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Stop the bundle
bundle.stop()
# Assert the events have been received
self.assertEqual([BundleEvent.STOPPING, BundleEvent.STOPPING_PRECLEAN,
BundleEvent.STOPPED], self.received,
"Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle
bundle.uninstall()
# Assert the events have been received
self.assertEqual([BundleEvent.UNINSTALLED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Unregister from events
context.remove_bundle_listener(self)
# ------------------------------------------------------------------------------
class ServiceEventTest(unittest.TestCase):
"""
Pelix service event tests
"""
def setUp(self):
"""
Called before each test. Initiates a framework.
"""
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SERVICE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
"""
Called after each test
"""
self.framework.stop()
FrameworkFactory.delete_framework()
def reset_state(self):
"""
Resets the flags
"""
del self.received[:]
def service_changed(self, event):
"""
Called by the framework when a service event is triggered
@param event: The ServiceEvent
"""
assert isinstance(event, ServiceEvent)
ref = event.get_service_reference()
self.assertIsNotNone(ref, "Invalid service reference in the event")
kind = event.get_kind()
if kind == ServiceEvent.MODIFIED \
or kind == ServiceEvent.MODIFIED_ENDMATCH:
# Properties have been modified
self.assertNotEqual(ref.get_properties(),
event.get_previous_properties(),
"Modified event for unchanged properties")
self.assertNotIn(kind, self.received, "Event received twice")
self.received.append(kind)
def testDoubleListener(self):
"""
Tests double registration / unregistration
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Double registration
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
log_off()
self.assertFalse(context.add_service_listener(self),
"Service listener registered twice")
log_on()
# Double unregistration
self.assertTrue(context.remove_service_listener(self),
"Can't unregister the service listener")
log_off()
self.assertFalse(context.remove_service_listener(self),
"Service listener unregistered twice")
log_on()
def testInvalidFilterListener(self):
"""
Tests invalid filter listener registration
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
log_off()
self.assertRaises(BundleException, context.add_service_listener, self,
"Invalid")
log_on()
self.assertFalse(context.remove_service_listener(self),
"Invalid filter was registered anyway")
def testServiceEventsNormal(self):
|
def testServiceEventsNoStop(self):
"""
Tests if the signals are correctly received, even if the service is not
correctly removed
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Assert the Install events has been received
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle, without unregistering the service
module_ = bundle.get_module()
module_.unregister = False
bundle.uninstall()
# Assert the events have been received
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Unregister from events
context.remove_service_listener(self)
def testServiceModified(self):
"""
Tests the service modified event
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_service_listener(self, "(test=True)"),
"Can't register the service listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Get the service
ref = context.get_service_reference(IEchoService)
self.assertIsNotNone(ref, "ServiceReference not found")
svc = context.get_service(ref)
self.assertIsNotNone(ref, "Invalid service instance")
# Modify the service => Simple modification
svc.modify({"answer": 42})
self.assertEqual([ServiceEvent.MODIFIED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Set the same value => No event should be sent
svc.modify({"answer": 42})
self.assertEqual([], self.received,
"Received {0}".format(self.received))
self.reset_state()
# Modify the service => Ends the filter match
svc.modify({"test": False})
# Assert the events have been received
self.assertEqual([ServiceEvent.MODIFIED_ENDMATCH],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Modify the service => the filter matches again
svc.modify({"test": True})
# Assert the events have been received
self.assertEqual([ServiceEvent.MODIFIED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Stop the bundle
bundle.stop()
# Assert the events have been received
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle
bundle.uninstall()
# Unregister from events
context.remove_service_listener(self)
# ------------------------------------------------------------------------------
class EventListenerHookTest(unittest.TestCase):
"""
Event Listener Hook tests
"""
def setUp(self):
"""
Called before each test. Initiates a framework.
"""
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SERVICE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
"""
Called after each test
"""
self.framework.stop()
self.framework.delete()
def test_normal_behaviour(self):
"""
Checks if event listener hooks are registered correctly
"""
# Test implementation
events = []
class Hook(object):
@staticmethod
def event(svc_event, listeners_dict):
events.append((svc_event, listeners_dict))
# Register the hook
ctx = self.framework.get_bundle_context()
reg = ctx.register_service(SERVICE_EVENT_LISTENER_HOOK, Hook(), {})
# Hooks shouldn't be aware of themselves
self.assertFalse(events)
# Register a dummy service
dummy_reg = ctx.register_service("dummy", object(), {})
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.REGISTERED)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# No listeners are registered
self.assertFalse(listeners)
# Update the service
dummy_reg.set_properties({"hello": "world"})
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.MODIFIED)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# Unregister the service
dummy_reg.unregister()
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.UNREGISTERING)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# Unregister the hook
reg.unregister()
# Register a new service
ctx.register_service("dummy", object(), {})
# Hook must not be notified
self.assertFalse(events)
def test_hook(self):
"""
Tests the hook filtering behaviour
"""
# Add a bundle to have two contexts in the test
fw_ctx = self.framework.get_bundle_context()
bnd = fw_ctx.install_bundle("tests.dummy_1")
bnd.start()
bnd_ctx = bnd.get_bundle_context()
# Setup a hook
class Hook(object):
@staticmethod
def event(svc_event, listeners_dict):
to_remove = svc_event.get_service_reference() \
.get_property("to.remove")
info_to_remove = []
for listener_bc, listeners_info in listeners_dict.items():
# Check the dictionary content
for listener_info in listeners_info:
self.assertIs(listener_bc, listener_info.bundle_context)
self.assertIs(
listener_bc, listener_info.listener.context)
self.assertIs(
listener_bc, listener_info.get_bundle_context())
if listener_info.listener in to_remove:
info_to_remove.append(listener_info)
# Remove the requested listeners
for listener_info in info_to_remove:
listeners_dict[listener_info.bundle_context] \
.remove(listener_info)
fw_ctx.register_service(SERVICE_EVENT_LISTENER_HOOK, Hook(), {})
# Register multiple listeners
class Listener(object):
def __init__(self, bc):
self.context = bc
self.storage = []
bc.add_service_listener(self)
def service_changed(self, event):
self.storage.append(event)
listener_referee = Listener(fw_ctx)
listener_1 = Listener(fw_ctx)
listener_2 = Listener(bnd_ctx)
# Register a service that only the referee will get
reg = fw_ctx.register_service(
"dummy", object(), {"to.remove": [listener_1, listener_2]})
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.REGISTERED)
self.assertFalse(listener_1.storage)
self.assertFalse(listener_2.storage)
# Modify it so that listener_1 gets it
reg.set_properties({"to.remove": [listener_2]})
self.assertFalse(listener_2.storage)
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.MODIFIED)
evt1 = listener_1.storage.pop(0)
self.assertIs(evt1, evt)
# Modify it so that listener_2, but not listener_1 gets it
reg.set_properties({"to.remove": [listener_1]})
self.assertFalse(listener_1.storage)
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.MODIFIED)
evt2 = listener_2.storage.pop(0)
self.assertIs(evt2, evt)
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set logging level
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| """
Tests if the signals are correctly received
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Assert the Install events has been received
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Stop the bundle
bundle.stop()
# Assert the events have been received
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle
bundle.uninstall()
# Assert the events have been received
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
# Unregister from events
context.remove_service_listener(self) |
script.js | ;(function()
{
if (window.BXMainMailConfirm)
return;
var options = {};
var mailboxes = [];
var listParams = {};
var action;
var BXMainMailConfirm = {
init: function(params)
{
mailboxes = params.mailboxes;
action = params.action;
delete params.mailboxes;
options = params;
},
getMailboxes: function()
{
return mailboxes;
},
showList: function(id, bind, params)
{
if (!BX.type.isNotEmptyString(params.placeholder)) | }
if (!(params.settings && params.settings.length))
{
params.settings = [];
}
if (!BX.type.isFunction(params.callback))
{
params.callback = function() {};
}
if((typeof params.popupSettings) != "object")
{
params.popupSettings = {};
}
params.popupSettings.className = 'main-mail-confirm-menu-content';
params.popupSettings.offsetLeft = 40;
params.popupSettings.angle = true;
params.popupSettings.closeByEsc = true;
listParams[id] = params;
var items = [];
var handler = function(event, item)
{
var action = 'apply';
if (event && event.target)
{
var deleteIconClass = 'main-mail-confirm-menu-delete-icon';
if (BX.hasClass(event.target, deleteIconClass) || BX.findParent(event.target, { class: deleteIconClass }, item.layout.item))
{
action = 'delete';
}
if (BX.hasClass(event.target, "sender-hint") || BX.findParent(event.target, { class: "sender-hint" }, item.layout.item))
{
action = 'edit';
}
}
if ('delete' == action)
{
BXMainMailConfirm.deleteSender(
item.id, function()
{
mailboxes = mailboxes.filter(function(value, index)
{
return item.id !== value.id
});
item.menuWindow.removeMenuItem(item.id);
if (listParams[id].selected == item.title)
{
listParams[id].callback('', listParams[id].placeholder);
}
}
);
}
else if ('edit' === action)
{
BXMainMailConfirm.showEditForm(item.id);
}
else
{
listParams[id].callback(item.title, item.text);
item.menuWindow.close();
}
};
if (!params.required)
{
items.push({
text: BX.util.htmlspecialchars(params.placeholder),
title: '',
onclick: handler
});
items.push({ delimiter: true });
}
if (mailboxes && mailboxes.length > 0)
{
var itemText, itemClass;
for (var i in mailboxes)
{
itemClass = 'menu-popup-no-icon';
itemText = BX.util.htmlspecialchars(mailboxes[i].formated);
if (mailboxes[i]['can_delete'] && mailboxes[i].id > 0)
{
itemText += '<span class="main-mail-confirm-menu-delete-icon popup-window-close-icon popup-window-titlebar-close-icon"\
title="' + BX.util.htmlspecialchars(BX.message('MAIN_MAIL_CONFIRM_DELETE')) + '"></span>';
itemClass = 'menu-popup-no-icon menu-popup-right-icon';
itemText += '<span data-role="sender-hint" class="sender-hint main-mail-edit-icon"\
title="' + BX.util.htmlspecialchars(BX.message('MAIN_MAIL_CONFIRM_EDIT')) + '"></span>';
itemClass = 'menu-popup-no-icon menu-popup-right-icon';
}
items.push({
html: itemText,
title: mailboxes[i].formated,
mailbox: mailboxes[i],
onclick: handler,
className: itemClass,
id: mailboxes[i].id
});
}
items.push({ delimiter: true });
}
items.push({
text: BX.util.htmlspecialchars(BX.message('MAIN_MAIL_CONFIRM_MENU')),
onclick: function(event, item)
{
item.menuWindow.close();
BXMainMailConfirm.showForm(function(mailbox, formated)
{
mailboxes.push({
email: mailbox.email,
name: mailbox.name,
id: mailbox.id,
formated: formated,
can_delete: true
});
listParams[id].callback(formated, BX.util.htmlspecialchars(formated));
BX.PopupMenu.destroy(id + '-menu');
});
}
});
//additional settings
if (params.settings.length > 0)
{
items = items.concat(params.settings);
}
BX.PopupMenu.show(
id + '-menu',
bind,
items,
params.popupSettings
);
},
showForm: function(callback, params)
{
window.step = 'email';
var senderId;
window.mode = params && params.mode ? params.mode : 'add';
var dlg = new BX.PopupWindow('add_from_email', null, {
titleBar: BX.message('MAIN_MAIL_CONFIRM_TITLE'),
draggable: true,
closeIcon: true,
lightShadow: true,
contentColor: 'white',
contentNoPaddings: true,
cacheable: false,
content: BX('new_from_email_dialog_content').innerHTML,
buttons: this.prepareDialogButtons(null, 'add', params, callback)
});
this.prepareDialog(dlg);
},
prepareDialog: function(dlg)
{
dlg.formFieldHint = function(field, type, text)
{
if (!field)
{
return;
}
var container = BX.findParent(field, { 'class': 'new-from-email-dialog-cell' });
var hint = BX.findChildByClassName(container, 'new-from-email-dialog-field-hint', true);
BX.removeClass(container, 'new-from-email-dialog-field-error');
BX.removeClass(container, 'new-from-email-dialog-field-warning');
switch (type)
{
case 'error':
BX.addClass(container, 'new-from-email-dialog-field-error');
break;
case 'warning':
BX.addClass(container, 'new-from-email-dialog-field-warning');
break;
}
if (typeof text != 'undefined' && text.length > 0)
{
BX.adjust(hint, { 'html': text });
BX.show(hint, 'block');
}
else
{
BX.hide(hint, 'block');
}
};
dlg.hideNotify = function()
{
var error = BX.findChild(dlg.contentContainer, { class: 'new-from-email-dialog-error' }, true);
if (error)
{
BX.hide(error, 'block');
}
};
dlg.showNotify = function(text)
{
var error = BX.findChild(dlg.contentContainer, { class: 'new-from-email-dialog-error' }, true);
if (error)
{
error.innerHTML = text;
BX.show(error, 'block');
}
};
dlg.switchBlock = function(block, immediately)
{
var emailBlock = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-email-block', true);
var codeBlock = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-code-block', true);
var hideBlock, showBlock;
if ('code' != step && 'code' == block)
{
hideBlock = emailBlock;
showBlock = codeBlock;
dlg.buttons[0].setName(BX.message('MAIN_MAIL_CONFIRM_SAVE'));
dlg.buttons[1].setName(BX.message('MAIN_MAIL_CONFIRM_BACK'));
}
else if ('code' == step && 'code' != block)
{
hideBlock = codeBlock;
showBlock = emailBlock;
dlg.buttons[0].setName(BX.message(
'smtp' == block && options.canCheckSmtp
? 'MAIN_MAIL_CONFIRM_SAVE'
: 'MAIN_MAIL_CONFIRM_GET_CODE'
));
dlg.buttons[1].setName(BX.message('MAIN_MAIL_CONFIRM_CANCEL'));
}
step = block;
if (hideBlock && showBlock)
{
if (immediately)
{
showBlock.style.position = '';
showBlock.style.height = '';
showBlock.style.display = '';
hideBlock.style.display = 'none';
}
else
{
hideBlock.style.height = hideBlock.offsetHeight + 'px';
hideBlock.offsetHeight;
hideBlock.style.height = '0px';
showBlock.style.position = 'absolute';
showBlock.style.height = '';
showBlock.style.display = '';
var showBlockHeight = showBlock.offsetHeight;
showBlock.style.height = '0px';
showBlock.style.position = '';
showBlock.offsetHeight;
showBlock.style.height = showBlockHeight + 'px';
}
}
};
var smtpLink = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-smtp-link', true);
var smtpBlock = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-smtp-block', true);
var useLimitCheckbox = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-smtp-use-limit', true);
if (useLimitCheckbox)
{
BX.bind(
useLimitCheckbox,
'click',
function()
{
var useLimitCheckbox = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-smtp-use-limit', true);
var emailBlock = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-email-block', true);
var smtpLimitField = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-limit' } }, true);
smtpLimitField.disabled = !useLimitCheckbox.checked;
}
)
}
if (smtpLink && smtpBlock)
{
BX.bind(
smtpLink,
'click',
function(event)
{
var emailBlock = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-email-block', true);
emailBlock.style.height = '';
if ('smtp' == step)
{
step = 'email';
BX.hide(smtpBlock, 'table-row-group');
dlg.buttons[0].setName(BX.message('MAIN_MAIL_CONFIRM_GET_CODE'));
}
else
{
step = 'smtp';
BX.show(smtpBlock, 'table-row-group');
dlg.buttons[0].setName(BX.message(
options.canCheckSmtp ? 'MAIN_MAIL_CONFIRM_SAVE' : 'MAIN_MAIL_CONFIRM_GET_CODE'
));
}
event.preventDefault();
}
);
}
if ('confirm' == window.mode)
{
dlg.switchBlock('code', true);
dlg.setOverlay(true);
}
dlg.show();
var emailBlock = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-email-block', true);
var nameField = BX.findChild(emailBlock, { attr: { 'data-name': 'name' } }, true);
var emailField = BX.findChild(emailBlock, { attr: { 'data-name': 'email' } }, true);
if (nameField.value.length > 0)
{
emailField.focus();
}
else
{
nameField.focus();
}
},
showEditForm: function(senderId)
{
window.step = 'email';
window.mode = 'edit';
var dlg = new BX.PopupWindow('edit_from_email', null, {
titleBar: BX.message('MAIN_MAIL_CONFIRM_EDIT_TITLE'),
draggable: true,
closeIcon: true,
lightShadow: true,
contentColor: 'white',
contentNoPaddings: true,
cacheable: false,
content: BX('new_from_email_dialog_content').innerHTML,
events: {
onPopupShow: function () {
BX.ajax({
'url': BX.util.add_url_param(action, {
'act': 'info',
senderId: senderId,
}),
'method': 'GET',
'dataType': 'json',
onsuccess: function (data)
{
var emailBlock = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-email-block', true);
var smtpLink = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-smtp-link', true);
var useLimitCheckbox = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-smtp-use-limit', true);
var nameField = BX.findChild(emailBlock, { attr: { 'data-name': 'name' } }, true);
var emailField = BX.findChild(emailBlock, { attr: { 'data-name': 'email' } }, true);
var smtpServerField = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-server' } }, true);
var smtpPortField = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-port' } }, true);
var smtpSslField = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-ssl' } }, true);
var smtpLoginField = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-login' } }, true);
var smtpLimitField = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-limit' } }, true);
nameField.value = data.name || '';
emailField.value = BX.util.htmlspecialchars(data.email);
smtpServerField.value = BX.util.htmlspecialchars(data.server || '');
smtpPortField.value = BX.util.htmlspecialchars(data.port || '');
smtpLoginField.value = BX.util.htmlspecialchars(data.login || '');
var hasNoLimit = typeof data.limit === 'undefined' || data.limit === null;
smtpLimitField.value = hasNoLimit ? smtpLimitField.value : data.limit;
if (!hasNoLimit)
{
useLimitCheckbox.checked = true;
smtpLimitField.disabled = false;
}
if (data.protocol === 'smtps')
{
smtpSslField.checked = true;
}
if (data.server)
{
BX.fireEvent(smtpLink, 'click');
}
},
onfailure: function (data)
{
},
})
}
},
buttons: this.prepareDialogButtons(senderId, 'edit')
});
this.prepareDialog(dlg);
},
prepareDialogButtons: function(senderId, act, params, callback)
{
return [
new BX.PopupWindowButton({
text: BX.message('MAIN_MAIL_CONFIRM_GET_CODE'),
className: 'popup-window-button-create',
events: {
click: function(event, popup)
{
var btn = this;
var dlg = btn.popupWindow;
if (BX.hasClass(btn.buttonNode, 'popup-window-button-wait'))
return;
var emailBlock = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-email-block', true);
var codeBlock = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-code-block', true);
var nameField = BX.findChild(emailBlock, { attr: { 'data-name': 'name' } }, true);
var emailField = BX.findChild(emailBlock, { attr: { 'data-name': 'email' } }, true);
var codeField = BX.findChild(codeBlock, { attr: { 'data-name': 'code' } }, true);
var publicField = BX.findChild(dlg.contentContainer, { attr: { 'data-name': 'public' } }, true);
var smtpServerField = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-server' } }, true);
var smtpPortField = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-port' } }, true);
var smtpSslField = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-ssl' } }, true);
var smtpLoginField = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-login' } }, true);
var smtpPassField = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-password' } }, true);
var smtpLimit = BX.findChild(emailBlock, { attr: { 'data-name': 'smtp-limit' } }, true);
dlg.formFieldHint(smtpPassField);
if ('email' == window.step || 'smtp' == window.step)
{
codeField.value = '';
var atom = "[=a-z0-9_+~'!$&*^`|#%/?{}-]";
var pattern = new RegExp('^' + atom + '+(\\.' + atom + '+)*@([a-z0-9-]+\\.)+[a-z0-9-]{2,20}$', 'i');
if (!emailField.value.match(pattern))
{
dlg.showNotify(BX.message(emailField.value.length > 0
? 'MAIN_MAIL_CONFIRM_INVALID_EMAIL'
: 'MAIN_MAIL_CONFIRM_EMPTY_EMAIL'
));
return;
}
}
if ('smtp' == window.step)
{
if (!smtpServerField.value.match(/^([a-z0-9-]+\.)+[a-z0-9-]{2,20}$/))
{
dlg.showNotify(BX.message(smtpServerField.value.length > 0
? 'MAIN_MAIL_CONFIRM_INVALID_SMTP_SERVER'
: 'MAIN_MAIL_CONFIRM_EMPTY_SMTP_SERVER'
));
return;
}
if (!smtpPortField.value.match(/^[0-9]+$/) || smtpPortField.value < 1 || smtpPortField.value > 65535)
{
dlg.showNotify(BX.message(smtpPortField.value.length > 0
? 'MAIN_MAIL_CONFIRM_INVALID_SMTP_PORT'
: 'MAIN_MAIL_CONFIRM_EMPTY_SMTP_PORT'
));
return;
}
if (!(smtpLoginField.value.length > 0))
{
dlg.showNotify(BX.message('MAIN_MAIL_CONFIRM_EMPTY_SMTP_LOGIN'));
return;
}
if (!senderId && smtpPassField.value.length > 0)
{
if (smtpPassField.value.match(/^\^/))
{
dlg.showNotify(BX.message('MAIN_MAIL_CONFIRM_INVALID_SMTP_PASSWORD_CARET'));
return;
}
else if (smtpPassField.value.match(/\x00/))
{
dlg.showNotify(BX.message('MAIN_MAIL_CONFIRM_INVALID_SMTP_PASSWORD_NULL'));
return;
}
else if (smtpPassField.value.match(/^\s|\s$/))
{
dlg.formFieldHint(smtpPassField, 'warning', BX.message('MAIN_MAIL_CONFIRM_SPACE_SMTP_PASSWORD'));
}
}
else if (!senderId)
{
dlg.showNotify(BX.message('MAIN_MAIL_CONFIRM_EMPTY_SMTP_PASSWORD'));
return;
}
}
if ('code' == window.step)
{
if (codeField.value.length == 0)
{
dlg.showNotify(BX.message('MAIN_MAIL_CONFIRM_EMPTY_CODE'));
return;
}
}
dlg.hideNotify();
BX.addClass(btn.buttonNode, 'popup-window-button-wait');
var data = {
id: senderId,
name: nameField.value,
email: emailField.value,
smtp: {},
code: '',
public: publicField.checked ? publicField.value : ''
};
if ('smtp' == window.step)
{
data.smtp = {
server: smtpServerField.value,
port: smtpPortField.value,
ssl: smtpSslField.checked ? smtpSslField.value : '',
login: smtpLoginField.value,
password: smtpPassField.value,
limit: smtpLimit.disabled ? null : smtpLimit.value
};
}
if ('code' == window.step)
{
data.code = codeField.value;
}
if (params && params.data)
{
for (var i in params.data)
{
if (params.data.hasOwnProperty(i))
{
data[i] = params.data[i];
}
}
}
BX.ajax({
'url': BX.util.add_url_param(action, {
'act': act
}),
'method': 'POST',
'dataType': 'json',
'data': data,
onsuccess: function(data)
{
BX.removeClass(btn.buttonNode, 'popup-window-button-wait');
if (data.senderId)
{
senderId = data.senderId;
}
if (data.result == 'error')
{
dlg.showNotify(data.error);
}
else if (('email' == window.step || 'smtp' == window.step) && !data.confirmed)
{
dlg.formFieldHint(smtpPassField);
dlg.switchBlock('code');
}
else
{
btn.popupWindow.close();
if (callback && BX.type.isFunction(callback))
{
var mailboxName = nameField.value.length > 0
? nameField.value
: BX.message('MAIN_MAIL_CONFIRM_USER_FULL_NAME');
callback(
{
name: mailboxName,
email: emailField.value,
id: senderId
},
mailboxName.length > 0 ? mailboxName + ' <' + emailField.value + '>' : emailField.value
);
}
}
},
onfailure: function()
{
BX.removeClass(btn.buttonNode, 'popup-window-button-wait');
dlg.showNotify(BX.message('MAIN_MAIL_CONFIRM_AJAX_ERROR'));
}
});
}
}
}),
new BX.PopupWindowButton({
text: BX.message('MAIN_MAIL_CONFIRM_CANCEL'),
className: 'popup-window-button-link',
events: {
click: function()
{
var dlg = this.popupWindow;
if ('code' == window.step && 'confirm' != window.mode)
{
var smtpBlock = BX.findChildByClassName(dlg.contentContainer, 'new-from-email-dialog-smtp-block', true);
dlg.switchBlock(smtpBlock && smtpBlock.offsetHeight > 0 ? 'smtp' : 'email');
}
else
{
this.popupWindow.close();
}
}
}
})
]
},
updateListCanDel: function(id)
{
BX.ajax({
'url':BX.util.add_url_param(action, {
'act': 'sendersListCanDel',
}),
'method': 'POST',
'dataType': 'json',
'data': {},
onsuccess: function(data)
{
if (data.result == 'error')
{
BX.UI.Notification.Center.notify({
content: BX.message('MAIN_MAIL_DELETE_SENDER_ERROR')
});
}
else
{
mailboxes = mailboxes.filter(function(value, index)
{
if (!value.can_delete)
{
return true;
}
for (var i in data.mailboxes)
{
if (data.mailboxes[i].id == value.id)
{
return true;
}
}
return false;
});
BX.PopupMenu.destroy(id + '-menu');
}
},
onfailure: function(data)
{
BX.UI.Notification.Center.notify({
content: BX.message('MAIN_MAIL_DELETE_SENDER_ERROR')
});
}
});
},
deleteSender: function(senderId, callback)
{
BX.UI.Dialogs.MessageBox.show({
message: BX.message('MAIN_MAIL_CONFIRM_DELETE_SENDER_CONFIRM'),
modal: true,
buttons: BX.UI.Dialogs.MessageBoxButtons.OK_CANCEL,
onOk: function(messageBox)
{
return new Promise(
function(resolve,reject)
{
BX.ajax({
'url': BX.util.add_url_param(action, {
'act': 'delete',
}),
'method': 'POST',
'dataType': 'json',
'data': {
senderId: senderId
},
onsuccess: function(data)
{
if (data.result == 'error')
{
BX.UI.Notification.Center.notify({
content: BX.message('MAIN_MAIL_DELETE_SENDER_ERROR')
});
reject(data);
}
else
{
if (BX.type.isFunction(callback))
{
callback();
}
resolve(data);
}
},
onfailure: function(data)
{
BX.UI.Notification.Center.notify({
content: BX.message('MAIN_MAIL_DELETE_SENDER_ERROR')
});
reject(data);
}
});
}
);
},
onCancel: function(messageBox)
{
messageBox.close();
}
});
}
};
window.BXMainMailConfirm = BXMainMailConfirm;
})(); | {
params.placeholder = BX.message(params.required ? 'MAIN_MAIL_CONFIRM_MENU_UNKNOWN' : 'MAIN_MAIL_CONFIRM_MENU_PLACEHOLDER'); |
serialize_b2_rope_joint.rs | use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer};
use serde::de::DeserializeSeed;
use std::fmt;
use std::cell::RefCell;
use std::rc::Rc;
use crate::b2_body::*;
use crate::b2_settings::*;
use crate::joints::b2_rope_joint::*;
use crate::serialize_b2_joint::*;
use strum::VariantNames;
use strum_macros::EnumVariantNames;
pub(crate) trait B2ropeJoinToDef<D: UserDataType> {
fn get_def(&self) -> B2ropeJointDef<D>;
}
impl<D: UserDataType> B2ropeJoinToDef<D> for B2ropeJoint<D> {
fn get_def(&self) -> B2ropeJointDef<D> {
return B2ropeJointDef {
base: self.base.get_def(),
local_anchor_a: self.m_local_anchor_a,
local_anchor_b: self.m_local_anchor_b,
max_length: self.m_max_length,
};
}
}
impl<D: UserDataType> Serialize for B2ropeJointDef<D> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("B2ropeJointDef", 4)?;
state.serialize_field("base", &self.base)?;
state.serialize_field("local_anchor_a", &self.local_anchor_a)?;
state.serialize_field("local_anchor_b", &self.local_anchor_b)?;
state.serialize_field("max_length", &self.max_length)?;
state.end()
}
}
#[derive(Clone)]
pub(crate) struct B2ropeJointDefContext<D: UserDataType> {
pub(crate) m_body_array: Rc<RefCell<Vec<BodyPtr<D>>>>,
}
impl<'de, U: UserDataType> DeserializeSeed<'de> for B2ropeJointDefContext<U> {
type Value = B2ropeJointDef<U>;
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(field_identifier, rename_all = "lowercase")]
#[derive(EnumVariantNames)]
#[allow(non_camel_case_types)]
enum Field {
base,
local_anchor_a,
local_anchor_b,
max_length,
}
struct B2ropeJointDefVisitor<D: UserDataType>(B2ropeJointDefContext<D>);
impl<'de, U: UserDataType> Visitor<'de> for B2ropeJointDefVisitor<U> {
type Value = B2ropeJointDef<U>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct B2ropeJointDef")
}
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
where
V: SeqAccess<'de>,
{
let joint_def = B2ropeJointDef {
base: seq
.next_element_seed(B2jointDefVisitorContext {
m_body_array: self.0.m_body_array.clone(),
})?
.ok_or_else(|| de::Error::invalid_length(0, &self))?,
local_anchor_a: seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?,
local_anchor_b: seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?,
max_length: seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?,
};
Ok(joint_def)
}
fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
let mut joint_def = B2ropeJointDef::default();
while let Some(key) = map.next_key()? {
match key {
Field::base => {
joint_def.base = map.next_value_seed(B2jointDefVisitorContext {
m_body_array: self.0.m_body_array.clone(),
})?;
}
Field::local_anchor_a => {
joint_def.local_anchor_a = map.next_value()?;
}
Field::local_anchor_b => {
joint_def.local_anchor_b = map.next_value()?;
}
Field::max_length => {
joint_def.max_length = map.next_value()?;
}
}
}
Ok(joint_def)
}
}
deserializer.deserialize_struct(
"B2ropeJointDef",
Field::VARIANTS,
B2ropeJointDefVisitor(self),
)
}
} | use serde::de::{self, Deserializer, MapAccess, SeqAccess, Visitor}; |
|
hook_runner.rs | //! Runs a service lifecycle hook on a separate thread, and wraps the
//! whole execution in a future.
//!
//! Ideally, we'd want to use something like
//! [tokio_process](https://github.com/alexcrichton/tokio-process),
//! but we're not able to use that based on how our Windows hooks get
//! executed. If that were to be re-cast in terms of Rust's
//! `std::process::Command`, we could consider it. In the meantime,
//! this seems to do the trick.
use super::{hook_timer,
Pkg};
use habitat_common::templating::hooks::Hook;
use habitat_core::service::ServiceGroup;
use std::{clone::Clone,
sync::Arc,
time::{Duration,
Instant}};
use tokio::{task,
task::JoinError};
pub struct HookRunner<H: Hook + Sync> {
hook: Arc<H>,
service_group: ServiceGroup,
pkg: Pkg,
passwd: Option<String>,
}
// We cannot use `#[derive(Clone)]` here because it unnecessarily requires `H` to be
// `Clone`. See https://github.com/rust-lang/rust/issues/44151.
impl<H: Hook + Sync> Clone for HookRunner<H> {
fn clone(&self) -> Self {
Self { hook: self.hook.clone(),
service_group: self.service_group.clone(),
pkg: self.pkg.clone(),
passwd: self.passwd.clone(), }
}
}
impl<H> HookRunner<H> where H: Hook + Sync + 'static
{
pub fn new(hook: Arc<H>,
service_group: ServiceGroup,
pkg: Pkg,
passwd: Option<String>)
-> HookRunner<H> {
HookRunner { hook,
service_group, | pub async fn retryable_future(self) {
loop {
match self.clone().into_future().await {
Ok((maybe_exit_value, _duration)) => {
// If we did not get an exit value always retry
if maybe_exit_value.as_ref().map_or(true, H::should_retry) {
debug!("Retrying the '{}' hook", H::file_name());
} else {
break;
}
}
Err(e) => error!("Error running the '{}' hook: {:?}", H::file_name(), e),
}
}
}
pub async fn into_future(self) -> Result<(Option<H::ExitValue>, Duration), JoinError> {
// TODO (CM): May want to consider adding a configurable
// timeout to how long this hook is allowed to run.
task::spawn_blocking(move || {
// _timer is for Prometheus metrics, but we also want
// the runtime for other purposes. Unfortunately,
// we're not able to use the same timer for both :(
let _timer = hook_timer(H::file_name());
let start = Instant::now();
let exit_value = self.hook
.run(&self.service_group, &self.pkg, self.passwd.as_ref())
.ok();
let run_time = start.elapsed();
(exit_value, run_time)
}).await
}
} | pkg,
passwd }
}
|
rule.go | // Copyright 2019 TiKV Project Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package placement
import (
"encoding/hex"
"encoding/json"
"sort"
)
// PeerRoleType is the expected peer type of the placement rule.
type PeerRoleType string
const (
// Voter can either match a leader peer or follower peer
Voter PeerRoleType = "voter"
// Leader matches a leader.
Leader PeerRoleType = "leader"
// Follower matches a follower.
Follower PeerRoleType = "follower"
// Learner matches a learner.
Learner PeerRoleType = "learner"
)
func validateRole(s PeerRoleType) bool {
return s == Voter || s == Leader || s == Follower || s == Learner
}
// Rule is the placement rule that can be checked against a region. When
// applying rules (apply means schedule regions to match selected rules), the
// apply order is defined by the tuple [GroupID, Index, ID].
type Rule struct {
GroupID string `json:"group_id"` // mark the source that add the rule
ID string `json:"id"` // unique ID within a group
Index int `json:"index,omitempty"` // rule apply order in a group, rule with less ID is applied first when indexes are equal
Override bool `json:"override,omitempty"` // when it is true, all rules with less indexes are disabled
StartKey []byte `json:"-"` // range start key
StartKeyHex string `json:"start_key"` // hex format start key, for marshal/unmarshal
EndKey []byte `json:"-"` // range end key
EndKeyHex string `json:"end_key"` // hex format end key, for marshal/unmarshal
Role PeerRoleType `json:"role"` // expected role of the peers
Count int `json:"count"` // expected count of the peers
LabelConstraints []LabelConstraint `json:"label_constraints,omitempty"` // used to select stores to place peers
LocationLabels []string `json:"location_labels,omitempty"` // used to make peers isolated physically
}
func (r Rule) String() string {
b, _ := json.Marshal(r)
return string(b)
}
// Key returns (groupID, ID) as the global unique key of a rule.
func (r Rule) Key() [2]string {
return [2]string{r.GroupID, r.ID}
}
// StoreKey returns the rule's key for persistent store.
func (r Rule) StoreKey() string {
return hex.EncodeToString([]byte(r.GroupID)) + "-" + hex.EncodeToString([]byte(r.ID))
}
// Rules are ordered by (GroupID, Index, ID).
func compareRule(a, b *Rule) int {
switch {
case a.GroupID < b.GroupID:
return -1
case a.GroupID > b.GroupID:
return 1
case a.Index < b.Index:
return -1
case a.Index > b.Index:
return 1
case a.ID < b.ID:
return -1
case a.ID > b.ID:
return 1
default:
return 0
}
}
func sortRules(rules []*Rule) {
sort.Slice(rules, func(i, j int) bool { return compareRule(rules[i], rules[j]) < 0 })
}
// Sort Rules, trim concealed rules.
func | (rules []*Rule) []*Rule {
var res []*Rule
var i, j int
for i = 1; i < len(rules); i++ {
if rules[j].GroupID != rules[i].GroupID {
res = append(res, rules[j:i]...)
j = i
}
if rules[i].Override {
j = i
}
}
return append(res, rules[j:]...)
}
| prepareRulesForApply |
main.rs | use ethereum::{BlockIngestor as EthereumBlockIngestor, EthereumAdapterTrait, EthereumNetworks};
use git_testament::{git_testament, render_testament};
use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor;
use graph::blockchain::{Block as BlockchainBlock, Blockchain, BlockchainKind, BlockchainMap};
use graph::components::store::BlockStore;
use graph::data::graphql::effort::LoadManager;
use graph::firehose::{FirehoseEndpoints, FirehoseNetworks};
use graph::log::logger;
use graph::prelude::{IndexNodeServer as _, JsonRpcServer as _, *};
use graph::prometheus::Registry;
use graph::url::Url;
use graph_chain_ethereum as ethereum;
use graph_chain_near::{self as near, HeaderOnlyBlock as NearFirehoseHeaderOnlyBlock};
use graph_chain_tendermint::{self as tendermint, EventList as TendermintFirehoseEventList};
use graph_core::{
LinkResolver, MetricsRegistry, SubgraphAssignmentProvider as IpfsSubgraphAssignmentProvider,
SubgraphInstanceManager, SubgraphRegistrar as IpfsSubgraphRegistrar,
};
use graph_graphql::prelude::GraphQlRunner;
use graph_node::chain::{
connect_ethereum_networks, connect_firehose_networks, create_ethereum_networks,
create_firehose_networks, create_ipfs_clients, REORG_THRESHOLD,
};
use graph_node::config::Config;
use graph_node::opt;
use graph_node::store_builder::StoreBuilder;
use graph_server_http::GraphQLServer as GraphQLQueryServer;
use graph_server_index_node::IndexNodeServer;
use graph_server_json_rpc::JsonRpcServer;
use graph_server_metrics::PrometheusMetricsServer;
use graph_server_websocket::SubscriptionServer as GraphQLSubscriptionServer;
use graph_store_postgres::{register_jobs as register_store_jobs, ChainHeadUpdateListener, Store};
use std::collections::BTreeMap;
use std::io::{BufRead, BufReader};
use std::path::Path;
use std::sync::atomic;
use std::time::Duration;
use std::{collections::HashMap, env};
use structopt::StructOpt;
use tokio::sync::mpsc;
git_testament!(TESTAMENT);
fn read_expensive_queries() -> Result<Vec<Arc<q::Document>>, std::io::Error> {
// A file with a list of expensive queries, one query per line
// Attempts to run these queries will return a
// QueryExecutionError::TooExpensive to clients
const EXPENSIVE_QUERIES: &str = "/etc/graph-node/expensive-queries.txt";
let path = Path::new(EXPENSIVE_QUERIES);
let mut queries = Vec::new();
if path.exists() {
let file = std::fs::File::open(path)?;
let reader = BufReader::new(file);
for line in reader.lines() {
let line = line?;
let query = graphql_parser::parse_query(&line)
.map_err(|e| {
let msg = format!(
"invalid GraphQL query in {}: {}\n{}",
EXPENSIVE_QUERIES,
e.to_string(),
line
);
std::io::Error::new(std::io::ErrorKind::InvalidData, msg)
})?
.into_static();
queries.push(Arc::new(query));
}
}
Ok(queries)
}
#[tokio::main]
async fn main() {
env_logger::init();
// Allow configuring fail points on debug builds. Used for integration tests.
#[cfg(debug_assertions)]
std::mem::forget(fail::FailScenario::setup());
let opt = opt::Opt::from_args();
// Set up logger
let logger = logger(opt.debug);
// Log version information
info!(
logger,
"Graph Node version: {}",
render_testament!(TESTAMENT)
);
if opt.unsafe_config {
warn!(logger, "allowing unsafe configurations");
graph::env::UNSAFE_CONFIG.store(true, atomic::Ordering::SeqCst);
}
if !graph_server_index_node::POI_PROTECTION.is_active() {
warn!(
logger,
"GRAPH_POI_ACCESS_TOKEN not set; might leak POIs to the public via GraphQL"
);
}
let config = match Config::load(&logger, &opt.clone().into()) {
Err(e) => {
eprintln!("configuration error: {}", e);
std::process::exit(1);
}
Ok(config) => config,
};
if opt.check_config {
match config.to_json() {
Ok(txt) => println!("{}", txt),
Err(e) => eprintln!("error serializing config: {}", e),
}
eprintln!("Successfully validated configuration");
std::process::exit(0);
}
let node_id =
NodeId::new(opt.node_id.clone()).expect("Node ID must contain only a-z, A-Z, 0-9, and '_'");
let query_only = config.query_only(&node_id);
// Obtain subgraph related command-line arguments
let subgraph = opt.subgraph.clone();
// Obtain ports to use for the GraphQL server(s)
let http_port = opt.http_port;
let ws_port = opt.ws_port;
// Obtain JSON-RPC server port
let json_rpc_port = opt.admin_port;
// Obtain index node server port
let index_node_port = opt.index_node_port;
// Obtain metrics server port
let metrics_port = opt.metrics_port;
// Obtain the fork base URL
let fork_base = match &opt.fork_base {
Some(url) => {
// Make sure the endpoint ends with a terminating slash.
let url = if !url.ends_with("/") {
let mut url = url.clone();
url.push('/');
Url::parse(&url)
} else {
Url::parse(url)
};
Some(url.expect("Failed to parse the fork base URL"))
}
None => {
warn!(
logger,
"No fork base URL specified, subgraph forking is disabled"
);
None
}
};
info!(logger, "Starting up");
// Optionally, identify the Elasticsearch logging configuration
let elastic_config = opt
.elasticsearch_url
.clone()
.map(|endpoint| ElasticLoggingConfig {
endpoint: endpoint.clone(),
username: opt.elasticsearch_user.clone(),
password: opt.elasticsearch_password.clone(),
});
// Create a component and subgraph logger factory
let logger_factory = LoggerFactory::new(logger.clone(), elastic_config);
// Try to create IPFS clients for each URL specified in `--ipfs`
let ipfs_clients: Vec<_> = create_ipfs_clients(&logger, &opt.ipfs);
// Convert the clients into a link resolver. Since we want to get past
// possible temporary DNS failures, make the resolver retry
let link_resolver = Arc::new(LinkResolver::from(ipfs_clients));
// Set up Prometheus registry
let prometheus_registry = Arc::new(Registry::new());
let metrics_registry = Arc::new(MetricsRegistry::new(
logger.clone(),
prometheus_registry.clone(),
));
let mut metrics_server =
PrometheusMetricsServer::new(&logger_factory, prometheus_registry.clone());
// Ethereum clients; query nodes ignore all ethereum clients and never
// connect to them directly
let eth_networks = if query_only {
EthereumNetworks::new()
} else {
create_ethereum_networks(logger.clone(), metrics_registry.clone(), &config)
.await
.expect("Failed to parse Ethereum networks")
};
let mut firehose_networks_by_kind = if query_only {
BTreeMap::new()
} else {
create_firehose_networks(logger.clone(), metrics_registry.clone(), &config)
.await
.expect("Failed to parse Firehose networks")
};
let graphql_metrics_registry = metrics_registry.clone();
let contention_logger = logger.clone();
let expensive_queries = read_expensive_queries().unwrap();
let store_builder = StoreBuilder::new(
&logger,
&node_id,
&config,
fork_base,
metrics_registry.cheap_clone(),
)
.await;
let launch_services = |logger: Logger| async move {
let subscription_manager = store_builder.subscription_manager();
let chain_head_update_listener = store_builder.chain_head_update_listener();
let primary_pool = store_builder.primary_pool();
// To support the ethereum block ingestor, ethereum networks are referenced both by the
// `blockchain_map` and `ethereum_chains`. Future chains should be referred to only in
// `blockchain_map`.
let mut blockchain_map = BlockchainMap::new();
let (eth_networks, ethereum_idents) =
connect_ethereum_networks(&logger, eth_networks).await;
let (near_networks, near_idents) =
connect_firehose_networks::<NearFirehoseHeaderOnlyBlock>(
&logger,
firehose_networks_by_kind
.remove(&BlockchainKind::Near)
.unwrap_or_else(|| FirehoseNetworks::new()),
)
.await;
let (tendermint_networks, tendermint_idents) =
connect_firehose_networks::<TendermintFirehoseEventList>(
&logger,
firehose_networks_by_kind
.remove(&BlockchainKind::Tendermint)
.unwrap_or_else(|| FirehoseNetworks::new()),
)
.await;
let network_identifiers = ethereum_idents
.into_iter()
.chain(near_idents)
.chain(tendermint_idents)
.collect();
let network_store = store_builder.network_store(network_identifiers);
let ethereum_chains = ethereum_networks_as_chains(
&mut blockchain_map,
&logger,
node_id.clone(),
metrics_registry.clone(),
firehose_networks_by_kind.get(&BlockchainKind::Ethereum),
ð_networks,
network_store.as_ref(),
chain_head_update_listener,
&logger_factory,
);
let near_chains = near_networks_as_chains(
&mut blockchain_map,
&logger,
&near_networks,
network_store.as_ref(),
&logger_factory,
);
let tendermint_chains = tendermint_networks_as_chains(
&mut blockchain_map,
&logger,
&tendermint_networks,
network_store.as_ref(),
&logger_factory,
);
let blockchain_map = Arc::new(blockchain_map);
let load_manager = Arc::new(LoadManager::new(
&logger,
expensive_queries,
metrics_registry.clone(),
));
let graphql_runner = Arc::new(GraphQlRunner::new(
&logger,
network_store.clone(),
subscription_manager.clone(),
load_manager,
metrics_registry.clone(),
));
let mut graphql_server = GraphQLQueryServer::new(
&logger_factory,
graphql_metrics_registry,
graphql_runner.clone(),
node_id.clone(),
);
let subscription_server =
GraphQLSubscriptionServer::new(&logger, graphql_runner.clone(), network_store.clone());
let mut index_node_server = IndexNodeServer::new(
&logger_factory,
graphql_runner.clone(),
network_store.clone(), | );
if !opt.disable_block_ingestor {
if ethereum_chains.len() > 0 {
let block_polling_interval = Duration::from_millis(opt.ethereum_polling_interval);
start_block_ingestor(
&logger,
&logger_factory,
block_polling_interval,
ethereum_chains,
);
}
start_firehose_block_ingestor::<_, NearFirehoseHeaderOnlyBlock>(
&logger,
&network_store,
near_chains,
);
start_firehose_block_ingestor::<_, TendermintFirehoseEventList>(
&logger,
&network_store,
tendermint_chains,
);
// Start a task runner
let mut job_runner = graph::util::jobs::Runner::new(&logger);
register_store_jobs(
&mut job_runner,
network_store.clone(),
primary_pool,
metrics_registry.clone(),
);
graph::spawn_blocking(job_runner.start());
}
let static_filters = env::var_os("EXPERIMENTAL_STATIC_FILTERS").is_some();
let subgraph_instance_manager = SubgraphInstanceManager::new(
&logger_factory,
network_store.subgraph_store(),
blockchain_map.cheap_clone(),
metrics_registry.clone(),
link_resolver.cheap_clone(),
static_filters,
);
// Create IPFS-based subgraph provider
let subgraph_provider = IpfsSubgraphAssignmentProvider::new(
&logger_factory,
link_resolver.cheap_clone(),
subgraph_instance_manager,
);
// Check version switching mode environment variable
let version_switching_mode = SubgraphVersionSwitchingMode::parse(
env::var_os("EXPERIMENTAL_SUBGRAPH_VERSION_SWITCHING_MODE")
.unwrap_or_else(|| "instant".into())
.to_str()
.expect("invalid version switching mode"),
);
// Create named subgraph provider for resolving subgraph name->ID mappings
let subgraph_registrar = Arc::new(IpfsSubgraphRegistrar::new(
&logger_factory,
link_resolver.cheap_clone(),
Arc::new(subgraph_provider),
network_store.subgraph_store(),
subscription_manager,
blockchain_map,
node_id.clone(),
version_switching_mode,
));
graph::spawn(
subgraph_registrar
.start()
.map_err(|e| panic!("failed to initialize subgraph provider {}", e))
.compat(),
);
// Start admin JSON-RPC server.
let json_rpc_server = JsonRpcServer::serve(
json_rpc_port,
http_port,
ws_port,
subgraph_registrar.clone(),
node_id.clone(),
logger.clone(),
)
.expect("failed to start JSON-RPC admin server");
// Let the server run forever.
std::mem::forget(json_rpc_server);
// Add the CLI subgraph with a REST request to the admin server.
if let Some(subgraph) = subgraph {
let (name, hash) = if subgraph.contains(':') {
let mut split = subgraph.split(':');
(split.next().unwrap(), split.next().unwrap().to_owned())
} else {
("cli", subgraph)
};
let name = SubgraphName::new(name)
.expect("Subgraph name must contain only a-z, A-Z, 0-9, '-' and '_'");
let subgraph_id =
DeploymentHash::new(hash).expect("Subgraph hash must be a valid IPFS hash");
let debug_fork = opt
.debug_fork
.map(DeploymentHash::new)
.map(|h| h.expect("Debug fork hash must be a valid IPFS hash"));
let start_block = opt
.start_block
.map(|block| {
let mut split = block.split(":");
(
// BlockHash
split.next().unwrap().to_owned(),
// BlockNumber
split.next().unwrap().parse::<i64>().unwrap(),
)
})
.map(|(hash, number)| BlockPtr::try_from((hash.as_str(), number)))
.map(Result::unwrap);
graph::spawn(
async move {
subgraph_registrar.create_subgraph(name.clone()).await?;
subgraph_registrar
.create_subgraph_version(
name,
subgraph_id,
node_id,
debug_fork,
start_block,
)
.await
}
.map_err(|e| panic!("Failed to deploy subgraph from `--subgraph` flag: {}", e)),
);
}
// Serve GraphQL queries over HTTP
graph::spawn(
graphql_server
.serve(http_port, ws_port)
.expect("Failed to start GraphQL query server")
.compat(),
);
// Serve GraphQL subscriptions over WebSockets
graph::spawn(subscription_server.serve(ws_port));
// Run the index node server
graph::spawn(
index_node_server
.serve(index_node_port)
.expect("Failed to start index node server")
.compat(),
);
graph::spawn(
metrics_server
.serve(metrics_port)
.expect("Failed to start metrics server")
.compat(),
);
};
graph::spawn(launch_services(logger.clone()));
// Periodically check for contention in the tokio threadpool. First spawn a
// task that simply responds to "ping" requests. Then spawn a separate
// thread to periodically ping it and check responsiveness.
let (ping_send, mut ping_receive) = mpsc::channel::<crossbeam_channel::Sender<()>>(1);
graph::spawn(async move {
while let Some(pong_send) = ping_receive.recv().await {
let _ = pong_send.clone().send(());
}
panic!("ping sender dropped");
});
std::thread::spawn(move || loop {
std::thread::sleep(Duration::from_secs(1));
let (pong_send, pong_receive) = crossbeam_channel::bounded(1);
if futures::executor::block_on(ping_send.clone().send(pong_send)).is_err() {
debug!(contention_logger, "Shutting down contention checker thread");
break;
}
let mut timeout = Duration::from_millis(10);
while pong_receive.recv_timeout(timeout)
== Err(crossbeam_channel::RecvTimeoutError::Timeout)
{
debug!(contention_logger, "Possible contention in tokio threadpool";
"timeout_ms" => timeout.as_millis(),
"code" => LogCode::TokioContention);
if timeout < Duration::from_secs(10) {
timeout *= 10;
} else if std::env::var_os("GRAPH_KILL_IF_UNRESPONSIVE").is_some() {
// The node is unresponsive, kill it in hopes it will be restarted.
crit!(contention_logger, "Node is unresponsive, killing process");
std::process::abort()
}
}
});
futures::future::pending::<()>().await;
}
/// Return the hashmap of ethereum chains and also add them to `blockchain_map`.
fn ethereum_networks_as_chains(
blockchain_map: &mut BlockchainMap,
logger: &Logger,
node_id: NodeId,
registry: Arc<MetricsRegistry>,
firehose_networks: Option<&FirehoseNetworks>,
eth_networks: &EthereumNetworks,
store: &Store,
chain_head_update_listener: Arc<ChainHeadUpdateListener>,
logger_factory: &LoggerFactory,
) -> HashMap<String, Arc<ethereum::Chain>> {
let chains: Vec<_> = eth_networks
.networks
.iter()
.filter_map(|(network_name, eth_adapters)| {
store
.block_store()
.chain_store(network_name)
.map(|chain_store| {
let is_ingestible = chain_store.is_ingestible();
(network_name, eth_adapters, chain_store, is_ingestible)
})
.or_else(|| {
error!(
logger,
"No store configured for Ethereum chain {}; ignoring this chain",
network_name
);
None
})
})
.map(|(network_name, eth_adapters, chain_store, is_ingestible)| {
let firehose_endpoints = firehose_networks.and_then(|v| v.networks.get(network_name));
let chain = ethereum::Chain::new(
logger_factory.clone(),
network_name.clone(),
node_id.clone(),
registry.clone(),
chain_store.cheap_clone(),
chain_store,
firehose_endpoints.map_or_else(|| FirehoseEndpoints::new(), |v| v.clone()),
eth_adapters.clone(),
chain_head_update_listener.clone(),
*REORG_THRESHOLD,
is_ingestible,
);
(network_name.clone(), Arc::new(chain))
})
.collect();
for (network_name, chain) in chains.iter().cloned() {
blockchain_map.insert::<graph_chain_ethereum::Chain>(network_name, chain)
}
HashMap::from_iter(chains)
}
fn tendermint_networks_as_chains(
blockchain_map: &mut BlockchainMap,
logger: &Logger,
firehose_networks: &FirehoseNetworks,
store: &Store,
logger_factory: &LoggerFactory,
) -> HashMap<String, FirehoseChain<tendermint::Chain>> {
let chains: Vec<_> = firehose_networks
.networks
.iter()
.filter_map(|(network_name, firehose_endpoints)| {
store
.block_store()
.chain_store(network_name)
.map(|chain_store| (network_name, chain_store, firehose_endpoints))
.or_else(|| {
error!(
logger,
"No store configured for Tendermint chain {}; ignoring this chain",
network_name
);
None
})
})
.map(|(network_name, chain_store, firehose_endpoints)| {
(
network_name.clone(),
FirehoseChain {
chain: Arc::new(tendermint::Chain::new(
logger_factory.clone(),
network_name.clone(),
chain_store,
firehose_endpoints.clone(),
)),
firehose_endpoints: firehose_endpoints.clone(),
},
)
})
.collect();
for (network_name, firehose_chain) in chains.iter() {
blockchain_map
.insert::<tendermint::Chain>(network_name.clone(), firehose_chain.chain.clone())
}
HashMap::from_iter(chains)
}
/// Return the hashmap of NEAR chains and also add them to `blockchain_map`.
fn near_networks_as_chains(
blockchain_map: &mut BlockchainMap,
logger: &Logger,
firehose_networks: &FirehoseNetworks,
store: &Store,
logger_factory: &LoggerFactory,
) -> HashMap<String, FirehoseChain<near::Chain>> {
let chains: Vec<_> = firehose_networks
.networks
.iter()
.filter_map(|(chain_id, endpoints)| {
store
.block_store()
.chain_store(chain_id)
.map(|chain_store| (chain_id, chain_store, endpoints))
.or_else(|| {
error!(
logger,
"No store configured for NEAR chain {}; ignoring this chain", chain_id
);
None
})
})
.map(|(chain_id, chain_store, endpoints)| {
(
chain_id.clone(),
FirehoseChain {
chain: Arc::new(near::Chain::new(
logger_factory.clone(),
chain_id.clone(),
chain_store,
endpoints.clone(),
)),
firehose_endpoints: endpoints.clone(),
},
)
})
.collect();
for (chain_id, firehose_chain) in chains.iter() {
blockchain_map
.insert::<graph_chain_near::Chain>(chain_id.clone(), firehose_chain.chain.clone())
}
HashMap::from_iter(chains)
}
fn start_block_ingestor(
logger: &Logger,
logger_factory: &LoggerFactory,
block_polling_interval: Duration,
chains: HashMap<String, Arc<ethereum::Chain>>,
) {
info!(
logger,
"Starting block ingestors with {} chains [{}]",
chains.len(),
chains
.keys()
.map(|v| v.clone())
.collect::<Vec<String>>()
.join(", ")
);
// Create Ethereum block ingestors and spawn a thread to run each
chains
.iter()
.filter(|(network_name, chain)| {
if !chain.is_ingestible {
error!(logger, "Not starting block ingestor (chain is defective)"; "network_name" => &network_name);
}
chain.is_ingestible
})
.for_each(|(network_name, chain)| {
info!(
logger,
"Starting block ingestor for network";
"network_name" => &network_name
);
let eth_adapter = chain.cheapest_adapter();
let logger = logger_factory
.component_logger(
"BlockIngestor",
Some(ComponentLoggerConfig {
elastic: Some(ElasticComponentLoggerConfig {
index: String::from("block-ingestor-logs"),
}),
}),
)
.new(o!("provider" => eth_adapter.provider().to_string()));
// The block ingestor must be configured to keep at least REORG_THRESHOLD ancestors,
// because the json-rpc BlockStream expects blocks after the reorg threshold to be
// present in the DB.
let block_ingestor = EthereumBlockIngestor::new(
logger,
*REORG_THRESHOLD,
eth_adapter,
chain.chain_store(),
block_polling_interval,
)
.expect("failed to create Ethereum block ingestor");
// Run the Ethereum block ingestor in the background
graph::spawn(block_ingestor.into_polling_stream());
});
}
#[derive(Clone)]
struct FirehoseChain<C: Blockchain> {
chain: Arc<C>,
firehose_endpoints: FirehoseEndpoints,
}
fn start_firehose_block_ingestor<C, M>(
logger: &Logger,
store: &Store,
chains: HashMap<String, FirehoseChain<C>>,
) where
C: Blockchain,
M: prost::Message + BlockchainBlock + Default + 'static,
{
info!(
logger,
"Starting firehose block ingestors with {} chains [{}]",
chains.len(),
chains
.keys()
.map(|v| v.clone())
.collect::<Vec<String>>()
.join(", ")
);
// Create Firehose block ingestors and spawn a thread to run each
chains
.iter()
.for_each(|(network_name, chain)| {
info!(
logger,
"Starting firehose block ingestor for network";
"network_name" => &network_name
);
let endpoint = chain
.firehose_endpoints
.random()
.expect("One Firehose endpoint should exist at that execution point");
match store.block_store().chain_store(network_name.as_ref()) {
Some(s) => {
let block_ingestor = FirehoseBlockIngestor::<M>::new(
s,
endpoint.clone(),
logger.new(o!("component" => "FirehoseBlockIngestor", "provider" => endpoint.provider.clone())),
);
// Run the Firehose block ingestor in the background
graph::spawn(block_ingestor.run());
},
None => {
error!(logger, "Not starting firehose block ingestor (no chain store available)"; "network_name" => &network_name);
}
}
});
} | link_resolver.clone(), |
render_app.py | import numpy as np
from prnet.utils.render import vis_of_vertices, render_texture
from scipy import ndimage
def get_visibility(vertices, triangles, h, w):
triangles = triangles.T
vertices_vis = vis_of_vertices(vertices.T, triangles, h, w)
vertices_vis = vertices_vis.astype(bool)
for k in range(2):
tri_vis = vertices_vis[triangles[0,:]] | vertices_vis[triangles[1,:]] | vertices_vis[triangles[2,:]]
ind = triangles[:, tri_vis]
vertices_vis[ind] = True
# for k in range(2):
# tri_vis = vertices_vis[triangles[0,:]] & vertices_vis[triangles[1,:]] & vertices_vis[triangles[2,:]]
# ind = triangles[:, tri_vis]
# vertices_vis[ind] = True
vertices_vis = vertices_vis.astype(np.float32) #1 for visible and 0 for non-visible
return vertices_vis
def | (vertices_vis, triangles, uv_coords, h, w, resolution):
triangles = triangles.T
vertices_vis = vertices_vis.astype(np.float32)
uv_mask = render_texture(uv_coords.T, vertices_vis[np.newaxis, :], triangles, resolution, resolution, 1)
uv_mask = np.squeeze(uv_mask > 0)
uv_mask = ndimage.binary_closing(uv_mask)
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_closing(uv_mask)
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))
uv_mask = uv_mask.astype(np.float32)
return np.squeeze(uv_mask)
def get_depth_image(vertices, triangles, h, w, isShow = False):
z = vertices[:, 2:]
if isShow:
z = z/max(z)
depth_image = render_texture(vertices.T, z.T, triangles.T, h, w, 1)
return np.squeeze(depth_image) | get_uv_mask |
read_auxiliary.py | """This module provides auxiliary functions for the import process of the init file."""
import numpy as np
def create_attr_dict_est(init_dict, semipar=False, include_constant=False):
"""This function processes the imported initialization file so that it fulfills the
requirements for the subsequent estimation process.
"""
init_dict["AUX"] = {"init_values"}
init_values = []
if semipar is True:
if include_constant is True:
init_dict = add_constant(init_dict, semipar)
else:
pass
init_dict = read_keys_semipar(init_dict, init_values)
# semipar is False
else:
if include_constant is True:
init_dict = add_constant(init_dict, semipar)
else:
pass
init_dict = read_keys_par(init_dict, init_values)
init_dict = provide_auxiliary_information(init_dict, init_values)
return init_dict
def create_attr_dict_sim(init_dict):
"""This function processes the imported initialization file so that it fulfills the
requirements for the following simulation and estimation process.
"""
init_dict["AUX"] = {"init_values"}
init_values = []
init_dict = read_keys_par(init_dict, init_values)
init_dict = provide_auxiliary_information(init_dict, init_values)
return init_dict
def add_constant(init_dict, semipar=False):
"""The function checks if the user has provided a constant
for the relevant subsections:
["TREATED", "UNTREATED", "CHOICE"] for the parametric, and
["CHOICE"] for the semiparamteric estimation, respectively.
"""
if semipar is True:
if "const" not in init_dict["CHOICE"]["order"]:
init_dict["CHOICE"]["order"].insert(0, "const")
init_dict["CHOICE"]["params"] = np.array([1.0])
else:
pass
# semipar is False
else:
for key in ["TREATED", "UNTREATED", "CHOICE"]:
if "const" not in init_dict[key]["order"]:
init_dict[key]["order"].insert(0, "const")
init_dict[key]["params"] = np.array([1.0])
else:
pass
return init_dict
def read_keys_par(init_dict, init_values):
"""This function reads the information provided by the
["TREATED", "UNTREATED", "CHOICE", "DIST"] keys for
the simulation and parametric estimation.
"""
for key in ["TREATED", "UNTREATED", "CHOICE", "DIST"]:
if "params" in init_dict[key].keys():
init_dict[key]["params"] = np.array(init_dict[key]["params"])
init_values += list(init_dict[key]["params"])
else:
init_values += [0.0] * len(init_dict[key]["order"])
if np.all(init_dict["DIST"]["params"] == 0):
init_dict["DETERMINISTIC"] = True
else:
init_dict["DETERMINISTIC"] = False
return init_dict
def read_keys_semipar(init_dict, init_values):
"""This function reads the information provided by the
["TREATED", "UNTREATED", "CHOICE"] keys for
semiparametric estimation.
"""
for key in ["TREATED", "UNTREATED", "CHOICE"]:
if "params" in init_dict[key].keys():
init_dict[key]["params"] = np.array(init_dict[key]["params"])
init_values += list(init_dict[key]["params"])
else:
init_values += [0.0] * len(init_dict[key]["order"])
return init_dict
def provide_auxiliary_information(init_dict, init_values):
| """This function generates auxiliary information
given the parameters in the initialization dictionary
"""
num_covars = len(
set(
init_dict["TREATED"]["order"]
+ init_dict["UNTREATED"]["order"]
+ init_dict["CHOICE"]["order"]
)
)
covar_label = []
for section in ["TREATED", "UNTREATED", "CHOICE"]:
covar_label += [i for i in init_dict[section]["order"] if i not in covar_label]
# Generate the AUX section that include some additional auxiliary information
init_dict["AUX"] = {
"init_values": np.array(init_values),
"num_covars_choice": len(init_dict["CHOICE"]["order"]),
"num_covars_treated": len(init_dict["TREATED"]["order"]),
"num_covars_untreated": len(init_dict["UNTREATED"]["order"]),
"num_paras": len(init_values) + 1,
"num_covars": num_covars,
"labels": covar_label,
}
return init_dict |
|
router_test.go | package main
import (
"fmt"
"net/http"
"net/http/httptest"
"os"
"strconv"
"testing"
"github.com/astaxie/beego"
beegocontext "github.com/astaxie/beego/context"
"github.com/gin-gonic/gin"
"github.com/ihornet/gorouter"
"github.com/julienschmidt/httprouter"
"github.com/labstack/echo"
)
type (
Route struct {
Method string
Path string
}
)
var (
num = 0
static = []*Route{
{"GET", "/"},
{"GET", "/cmd.html"},
{"GET", "/code.html"},
{"GET", "/contrib.html"},
{"GET", "/contribute.html"},
{"GET", "/debugging_with_gdb.html"},
{"GET", "/docs.html"},
{"GET", "/effective_go.html"},
{"GET", "/files.log"},
{"GET", "/gccgo_contribute.html"},
{"GET", "/gccgo_install.html"},
{"GET", "/go-logo-black.png"},
{"GET", "/go-logo-blue.png"},
{"GET", "/go-logo-white.png"},
{"GET", "/go1.1.html"},
{"GET", "/go1.2.html"},
{"GET", "/go1.html"},
{"GET", "/go1compat.html"},
{"GET", "/go_faq.html"},
{"GET", "/go_mem.html"},
{"GET", "/go_spec.html"},
{"GET", "/help.html"},
{"GET", "/ie.css"},
{"GET", "/install-source.html"},
{"GET", "/install.html"},
{"GET", "/logo-153x55.png"},
{"GET", "/Makefile"},
{"GET", "/root.html"},
{"GET", "/share.png"},
{"GET", "/sieve.gif"},
{"GET", "/tos.html"},
{"GET", "/articles/"},
{"GET", "/articles/go_command.html"},
{"GET", "/articles/index.html"},
{"GET", "/articles/wiki/"},
{"GET", "/articles/wiki/edit.html"},
{"GET", "/articles/wiki/final-noclosure.go"},
{"GET", "/articles/wiki/final-noerror.go"},
{"GET", "/articles/wiki/final-parsetemplate.go"},
{"GET", "/articles/wiki/final-template.go"},
{"GET", "/articles/wiki/final.go"},
{"GET", "/articles/wiki/get.go"},
{"GET", "/articles/wiki/http-sample.go"},
{"GET", "/articles/wiki/index.html"},
{"GET", "/articles/wiki/Makefile"},
{"GET", "/articles/wiki/notemplate.go"},
{"GET", "/articles/wiki/part1-noerror.go"},
{"GET", "/articles/wiki/part1.go"},
{"GET", "/articles/wiki/part2.go"},
{"GET", "/articles/wiki/part3-errorhandling.go"},
{"GET", "/articles/wiki/part3.go"},
{"GET", "/articles/wiki/test.bash"},
{"GET", "/articles/wiki/test_edit.good"},
{"GET", "/articles/wiki/test_Test.txt.good"},
{"GET", "/articles/wiki/test_view.good"},
{"GET", "/articles/wiki/view.html"},
{"GET", "/codewalk/"},
{"GET", "/codewalk/codewalk.css"},
{"GET", "/codewalk/codewalk.js"},
{"GET", "/codewalk/codewalk.xml"},
{"GET", "/codewalk/functions.xml"},
{"GET", "/codewalk/markov.go"},
{"GET", "/codewalk/markov.xml"},
{"GET", "/codewalk/pig.go"},
{"GET", "/codewalk/popout.png"},
{"GET", "/codewalk/run"},
{"GET", "/codewalk/sharemem.xml"},
{"GET", "/codewalk/urlpoll.go"},
{"GET", "/devel/"},
{"GET", "/devel/release.html"},
{"GET", "/devel/weekly.html"},
{"GET", "/gopher/"},
{"GET", "/gopher/appenginegopher.jpg"},
{"GET", "/gopher/appenginegophercolor.jpg"},
{"GET", "/gopher/appenginelogo.gif"},
{"GET", "/gopher/bumper.png"},
{"GET", "/gopher/bumper192x108.png"},
{"GET", "/gopher/bumper320x180.png"},
{"GET", "/gopher/bumper480x270.png"},
{"GET", "/gopher/bumper640x360.png"},
{"GET", "/gopher/doc.png"},
{"GET", "/gopher/frontpage.png"},
{"GET", "/gopher/gopherbw.png"},
{"GET", "/gopher/gophercolor.png"},
{"GET", "/gopher/gophercolor16x16.png"},
{"GET", "/gopher/help.png"},
{"GET", "/gopher/pkg.png"},
{"GET", "/gopher/project.png"},
{"GET", "/gopher/ref.png"},
{"GET", "/gopher/run.png"},
{"GET", "/gopher/talks.png"},
{"GET", "/gopher/pencil/"},
{"GET", "/gopher/pencil/gopherhat.jpg"},
{"GET", "/gopher/pencil/gopherhelmet.jpg"},
{"GET", "/gopher/pencil/gophermega.jpg"},
{"GET", "/gopher/pencil/gopherrunning.jpg"},
{"GET", "/gopher/pencil/gopherswim.jpg"},
{"GET", "/gopher/pencil/gopherswrench.jpg"},
{"GET", "/play/"},
{"GET", "/play/fib.go"},
{"GET", "/play/hello.go"},
{"GET", "/play/life.go"},
{"GET", "/play/peano.go"},
{"GET", "/play/pi.go"},
{"GET", "/play/sieve.go"},
{"GET", "/play/solitaire.go"},
{"GET", "/play/tree.go"},
{"GET", "/progs/"},
{"GET", "/progs/cgo1.go"},
{"GET", "/progs/cgo2.go"},
{"GET", "/progs/cgo3.go"},
{"GET", "/progs/cgo4.go"},
{"GET", "/progs/defer.go"},
{"GET", "/progs/defer.out"},
{"GET", "/progs/defer2.go"},
{"GET", "/progs/defer2.out"},
{"GET", "/progs/eff_bytesize.go"},
{"GET", "/progs/eff_bytesize.out"},
{"GET", "/progs/eff_qr.go"},
{"GET", "/progs/eff_sequence.go"},
{"GET", "/progs/eff_sequence.out"},
{"GET", "/progs/eff_unused1.go"},
{"GET", "/progs/eff_unused2.go"},
{"GET", "/progs/error.go"},
{"GET", "/progs/error2.go"},
{"GET", "/progs/error3.go"},
{"GET", "/progs/error4.go"},
{"GET", "/progs/go1.go"},
{"GET", "/progs/gobs1.go"},
{"GET", "/progs/gobs2.go"},
{"GET", "/progs/image_draw.go"},
{"GET", "/progs/image_package1.go"},
{"GET", "/progs/image_package1.out"},
{"GET", "/progs/image_package2.go"},
{"GET", "/progs/image_package2.out"},
{"GET", "/progs/image_package3.go"},
{"GET", "/progs/image_package3.out"},
{"GET", "/progs/image_package4.go"},
{"GET", "/progs/image_package4.out"},
{"GET", "/progs/image_package5.go"},
{"GET", "/progs/image_package5.out"},
{"GET", "/progs/image_package6.go"},
{"GET", "/progs/image_package6.out"},
{"GET", "/progs/interface.go"},
{"GET", "/progs/interface2.go"},
{"GET", "/progs/interface2.out"},
{"GET", "/progs/json1.go"},
{"GET", "/progs/json2.go"},
{"GET", "/progs/json2.out"},
{"GET", "/progs/json3.go"},
{"GET", "/progs/json4.go"},
{"GET", "/progs/json5.go"},
{"GET", "/progs/run"},
{"GET", "/progs/slices.go"},
{"GET", "/progs/timeout1.go"},
{"GET", "/progs/timeout2.go"},
{"GET", "/progs/update.bash"},
}
githubAPI = []*Route{
// OAuth Authorizations
{"GET", "/authorizations"},
{"GET", "/authorizations/:id"},
{"POST", "/authorizations"},
//{"PUT", "/authorizations/clients/:client_id"},
//{"PATCH", "/authorizations/:id"},
{"DELETE", "/authorizations/:id"},
{"GET", "/applications/:client_id/tokens/:access_token"},
{"DELETE", "/applications/:client_id/tokens"},
{"DELETE", "/applications/:client_id/tokens/:access_token"},
// Activity
{"GET", "/events"},
{"GET", "/repos/:owner/:repo/events"},
{"GET", "/networks/:owner/:repo/events"},
{"GET", "/orgs/:org/events"},
{"GET", "/users/:user/received_events"},
{"GET", "/users/:user/received_events/public"},
{"GET", "/users/:user/events"},
{"GET", "/users/:user/events/public"},
{"GET", "/users/:user/events/orgs/:org"},
{"GET", "/feeds"},
{"GET", "/notifications"},
{"GET", "/repos/:owner/:repo/notifications"},
{"PUT", "/notifications"},
{"PUT", "/repos/:owner/:repo/notifications"},
{"GET", "/notifications/threads/:id"},
//{"PATCH", "/notifications/threads/:id"},
{"GET", "/notifications/threads/:id/subscription"},
{"PUT", "/notifications/threads/:id/subscription"},
{"DELETE", "/notifications/threads/:id/subscription"},
{"GET", "/repos/:owner/:repo/stargazers"},
{"GET", "/users/:user/starred"},
{"GET", "/user/starred"},
{"GET", "/user/starred/:owner/:repo"},
{"PUT", "/user/starred/:owner/:repo"},
{"DELETE", "/user/starred/:owner/:repo"},
{"GET", "/repos/:owner/:repo/subscribers"},
{"GET", "/users/:user/subscriptions"},
{"GET", "/user/subscriptions"},
{"GET", "/repos/:owner/:repo/subscription"},
{"PUT", "/repos/:owner/:repo/subscription"},
{"DELETE", "/repos/:owner/:repo/subscription"},
{"GET", "/user/subscriptions/:owner/:repo"},
{"PUT", "/user/subscriptions/:owner/:repo"},
{"DELETE", "/user/subscriptions/:owner/:repo"},
// Gists
{"GET", "/users/:user/gists"},
{"GET", "/gists"},
//{"GET", "/gists/public"},
//{"GET", "/gists/starred"},
{"GET", "/gists/:id"},
{"POST", "/gists"},
//{"PATCH", "/gists/:id"},
{"PUT", "/gists/:id/star"},
{"DELETE", "/gists/:id/star"},
{"GET", "/gists/:id/star"},
{"POST", "/gists/:id/forks"},
{"DELETE", "/gists/:id"},
// Git Data
{"GET", "/repos/:owner/:repo/git/blobs/:sha"},
{"POST", "/repos/:owner/:repo/git/blobs"},
{"GET", "/repos/:owner/:repo/git/commits/:sha"},
{"POST", "/repos/:owner/:repo/git/commits"},
//{"GET", "/repos/:owner/:repo/git/refs/*ref"},
{"GET", "/repos/:owner/:repo/git/refs"},
{"POST", "/repos/:owner/:repo/git/refs"},
//{"PATCH", "/repos/:owner/:repo/git/refs/*ref"},
//{"DELETE", "/repos/:owner/:repo/git/refs/*ref"},
{"GET", "/repos/:owner/:repo/git/tags/:sha"},
{"POST", "/repos/:owner/:repo/git/tags"},
{"GET", "/repos/:owner/:repo/git/trees/:sha"},
{"POST", "/repos/:owner/:repo/git/trees"},
// Issues
{"GET", "/issues"},
{"GET", "/user/issues"},
{"GET", "/orgs/:org/issues"},
{"GET", "/repos/:owner/:repo/issues"},
{"GET", "/repos/:owner/:repo/issues/:number"},
{"POST", "/repos/:owner/:repo/issues"},
//{"PATCH", "/repos/:owner/:repo/issues/:number"},
{"GET", "/repos/:owner/:repo/assignees"},
{"GET", "/repos/:owner/:repo/assignees/:assignee"},
{"GET", "/repos/:owner/:repo/issues/:number/comments"},
//{"GET", "/repos/:owner/:repo/issues/comments"},
//{"GET", "/repos/:owner/:repo/issues/comments/:id"},
{"POST", "/repos/:owner/:repo/issues/:number/comments"},
//{"PATCH", "/repos/:owner/:repo/issues/comments/:id"},
//{"DELETE", "/repos/:owner/:repo/issues/comments/:id"},
{"GET", "/repos/:owner/:repo/issues/:number/events"},
//{"GET", "/repos/:owner/:repo/issues/events"},
//{"GET", "/repos/:owner/:repo/issues/events/:id"},
{"GET", "/repos/:owner/:repo/labels"},
{"GET", "/repos/:owner/:repo/labels/:name"},
{"POST", "/repos/:owner/:repo/labels"},
//{"PATCH", "/repos/:owner/:repo/labels/:name"},
{"DELETE", "/repos/:owner/:repo/labels/:name"},
{"GET", "/repos/:owner/:repo/issues/:number/labels"},
{"POST", "/repos/:owner/:repo/issues/:number/labels"},
{"DELETE", "/repos/:owner/:repo/issues/:number/labels/:name"},
{"PUT", "/repos/:owner/:repo/issues/:number/labels"},
{"DELETE", "/repos/:owner/:repo/issues/:number/labels"},
{"GET", "/repos/:owner/:repo/milestones/:number/labels"},
{"GET", "/repos/:owner/:repo/milestones"},
{"GET", "/repos/:owner/:repo/milestones/:number"},
{"POST", "/repos/:owner/:repo/milestones"},
//{"PATCH", "/repos/:owner/:repo/milestones/:number"},
{"DELETE", "/repos/:owner/:repo/milestones/:number"},
// Miscellaneous
{"GET", "/emojis"},
{"GET", "/gitignore/templates"},
{"GET", "/gitignore/templates/:name"},
{"POST", "/markdown"},
{"POST", "/markdown/raw"},
{"GET", "/meta"},
{"GET", "/rate_limit"},
// Organizations
{"GET", "/users/:user/orgs"},
{"GET", "/user/orgs"},
{"GET", "/orgs/:org"},
//{"PATCH", "/orgs/:org"},
{"GET", "/orgs/:org/members"},
{"GET", "/orgs/:org/members/:user"},
{"DELETE", "/orgs/:org/members/:user"},
{"GET", "/orgs/:org/public_members"},
{"GET", "/orgs/:org/public_members/:user"},
{"PUT", "/orgs/:org/public_members/:user"},
{"DELETE", "/orgs/:org/public_members/:user"},
{"GET", "/orgs/:org/teams"},
{"GET", "/teams/:id"},
{"POST", "/orgs/:org/teams"},
//{"PATCH", "/teams/:id"},
{"DELETE", "/teams/:id"},
{"GET", "/teams/:id/members"},
{"GET", "/teams/:id/members/:user"},
{"PUT", "/teams/:id/members/:user"},
{"DELETE", "/teams/:id/members/:user"},
{"GET", "/teams/:id/repos"},
{"GET", "/teams/:id/repos/:owner/:repo"},
{"PUT", "/teams/:id/repos/:owner/:repo"},
{"DELETE", "/teams/:id/repos/:owner/:repo"},
{"GET", "/user/teams"},
// Pull Requests
{"GET", "/repos/:owner/:repo/pulls"},
{"GET", "/repos/:owner/:repo/pulls/:number"},
{"POST", "/repos/:owner/:repo/pulls"},
//{"PATCH", "/repos/:owner/:repo/pulls/:number"},
{"GET", "/repos/:owner/:repo/pulls/:number/commits"},
{"GET", "/repos/:owner/:repo/pulls/:number/files"},
{"GET", "/repos/:owner/:repo/pulls/:number/merge"},
{"PUT", "/repos/:owner/:repo/pulls/:number/merge"},
{"GET", "/repos/:owner/:repo/pulls/:number/comments"},
//{"GET", "/repos/:owner/:repo/pulls/comments"},
//{"GET", "/repos/:owner/:repo/pulls/comments/:number"},
{"PUT", "/repos/:owner/:repo/pulls/:number/comments"},
//{"PATCH", "/repos/:owner/:repo/pulls/comments/:number"},
//{"DELETE", "/repos/:owner/:repo/pulls/comments/:number"},
// Repositories
{"GET", "/user/repos"},
{"GET", "/users/:user/repos"},
{"GET", "/orgs/:org/repos"},
{"GET", "/repositories"},
{"POST", "/user/repos"},
{"POST", "/orgs/:org/repos"},
{"GET", "/repos/:owner/:repo"},
//{"PATCH", "/repos/:owner/:repo"},
{"GET", "/repos/:owner/:repo/contributors"},
{"GET", "/repos/:owner/:repo/languages"},
{"GET", "/repos/:owner/:repo/teams"},
{"GET", "/repos/:owner/:repo/tags"},
{"GET", "/repos/:owner/:repo/branches"},
{"GET", "/repos/:owner/:repo/branches/:branch"},
{"DELETE", "/repos/:owner/:repo"},
{"GET", "/repos/:owner/:repo/collaborators"},
{"GET", "/repos/:owner/:repo/collaborators/:user"},
{"PUT", "/repos/:owner/:repo/collaborators/:user"},
{"DELETE", "/repos/:owner/:repo/collaborators/:user"},
{"GET", "/repos/:owner/:repo/comments"}, | {"POST", "/repos/:owner/:repo/commits/:sha/comments"},
{"GET", "/repos/:owner/:repo/comments/:id"},
//{"PATCH", "/repos/:owner/:repo/comments/:id"},
{"DELETE", "/repos/:owner/:repo/comments/:id"},
{"GET", "/repos/:owner/:repo/commits"},
{"GET", "/repos/:owner/:repo/commits/:sha"},
{"GET", "/repos/:owner/:repo/readme"},
//{"GET", "/repos/:owner/:repo/contents/*path"},
//{"PUT", "/repos/:owner/:repo/contents/*path"},
//{"DELETE", "/repos/:owner/:repo/contents/*path"},
//{"GET", "/repos/:owner/:repo/:archive_format/:ref"},
{"GET", "/repos/:owner/:repo/keys"},
{"GET", "/repos/:owner/:repo/keys/:id"},
{"POST", "/repos/:owner/:repo/keys"},
//{"PATCH", "/repos/:owner/:repo/keys/:id"},
{"DELETE", "/repos/:owner/:repo/keys/:id"},
{"GET", "/repos/:owner/:repo/downloads"},
{"GET", "/repos/:owner/:repo/downloads/:id"},
{"DELETE", "/repos/:owner/:repo/downloads/:id"},
{"GET", "/repos/:owner/:repo/forks"},
{"POST", "/repos/:owner/:repo/forks"},
{"GET", "/repos/:owner/:repo/hooks"},
{"GET", "/repos/:owner/:repo/hooks/:id"},
{"POST", "/repos/:owner/:repo/hooks"},
//{"PATCH", "/repos/:owner/:repo/hooks/:id"},
{"POST", "/repos/:owner/:repo/hooks/:id/tests"},
{"DELETE", "/repos/:owner/:repo/hooks/:id"},
{"POST", "/repos/:owner/:repo/merges"},
{"GET", "/repos/:owner/:repo/releases"},
{"GET", "/repos/:owner/:repo/releases/:id"},
{"POST", "/repos/:owner/:repo/releases"},
//{"PATCH", "/repos/:owner/:repo/releases/:id"},
{"DELETE", "/repos/:owner/:repo/releases/:id"},
{"GET", "/repos/:owner/:repo/releases/:id/assets"},
{"GET", "/repos/:owner/:repo/stats/contributors"},
{"GET", "/repos/:owner/:repo/stats/commit_activity"},
{"GET", "/repos/:owner/:repo/stats/code_frequency"},
{"GET", "/repos/:owner/:repo/stats/participation"},
{"GET", "/repos/:owner/:repo/stats/punch_card"},
{"GET", "/repos/:owner/:repo/statuses/:ref"},
{"POST", "/repos/:owner/:repo/statuses/:ref"},
// Search
{"GET", "/search/repositories"},
{"GET", "/search/code"},
{"GET", "/search/issues"},
{"GET", "/search/users"},
{"GET", "/legacy/issues/search/:owner/:repository/:state/:keyword"},
{"GET", "/legacy/repos/search/:keyword"},
{"GET", "/legacy/user/search/:keyword"},
{"GET", "/legacy/user/email/:email"},
// Users
{"GET", "/users/:user"},
{"GET", "/user"},
//{"PATCH", "/user"},
{"GET", "/users"},
{"GET", "/user/emails"},
{"POST", "/user/emails"},
{"DELETE", "/user/emails"},
{"GET", "/users/:user/followers"},
{"GET", "/user/followers"},
{"GET", "/users/:user/following"},
{"GET", "/user/following"},
{"GET", "/user/following/:user"},
{"GET", "/users/:user/following/:target_user"},
{"PUT", "/user/following/:user"},
{"DELETE", "/user/following/:user"},
{"GET", "/users/:user/keys"},
{"GET", "/user/keys"},
{"GET", "/user/keys/:id"},
{"POST", "/user/keys"},
//{"PATCH", "/user/keys/:id"},
{"DELETE", "/user/keys/:id"},
}
gplusAPI = []*Route{
// People
{"GET", "/people/:userId"},
{"GET", "/people"},
{"GET", "/activities/:activityId/people/:collection"},
{"GET", "/people/:userId/people/:collection"},
{"GET", "/people/:userId/openIdConnect"},
// Activities
{"GET", "/people/:userId/activities/:collection"},
{"GET", "/activities/:activityId"},
{"GET", "/activities"},
// Comments
{"GET", "/activities/:activityId/comments"},
{"GET", "/comments/:commentId"},
// Moments
{"POST", "/people/:userId/moments/:collection"},
{"GET", "/people/:userId/moments/:collection"},
{"DELETE", "/moments/:id"},
}
parseAPI = []*Route{
// Objects
{"POST", "/1/classes/:className"},
{"GET", "/1/classes/:className/:objectId"},
{"PUT", "/1/classes/:className/:objectId"},
{"GET", "/1/classes/:className"},
{"DELETE", "/1/classes/:className/:objectId"},
// Users
{"POST", "/1/users"},
{"GET", "/1/login"},
{"GET", "/1/users/:objectId"},
{"PUT", "/1/users/:objectId"},
{"GET", "/1/users"},
{"DELETE", "/1/users/:objectId"},
{"POST", "/1/requestPasswordReset"},
// Roles
{"POST", "/1/roles"},
{"GET", "/1/roles/:objectId"},
{"PUT", "/1/roles/:objectId"},
{"GET", "/1/roles"},
{"DELETE", "/1/roles/:objectId"},
// Files
{"POST", "/1/files/:fileName"},
// Analytics
{"POST", "/1/events/:eventName"},
// Push Notifications
{"POST", "/1/push"},
// Installations
{"POST", "/1/installations"},
{"GET", "/1/installations/:objectId"},
{"PUT", "/1/installations/:objectId"},
{"GET", "/1/installations"},
{"DELETE", "/1/installations/:objectId"},
// Cloud Functions
{"POST", "/1/functions"},
}
apis = [][]*Route{githubAPI, gplusAPI, parseAPI}
)
func init() {
str := os.Getenv("NUM")
if len(str) > 0 {
n, err := strconv.Atoi(str)
if err == nil {
num = n
}
}
}
func benchmarkRoutes(b *testing.B, router http.Handler, routes []*Route) {
b.ReportAllocs()
r := httptest.NewRequest("GET", "/", nil)
u := r.URL
w := httptest.NewRecorder()
for i := 0; i < b.N; i++ {
for _, route := range routes {
r.Method = route.Method
u.Path = route.Path
r.URL.Path = route.Path
router.ServeHTTP(w, r)
}
}
}
// echo
func loadEchoRoutes(e *echo.Echo, routes []*Route) {
for i := 0; i < num; i++ {
e.Use(func(h echo.HandlerFunc) echo.HandlerFunc {
return func(ctx echo.Context) error {
return h(ctx)
}
})
}
for _, r := range routes {
switch r.Method {
case "GET":
e.GET(r.Path, echoHandler(r.Method, r.Path))
case "POST":
e.POST(r.Path, echoHandler(r.Method, r.Path))
case "PATCH":
e.PATCH(r.Path, echoHandler(r.Method, r.Path))
case "PUT":
e.PUT(r.Path, echoHandler(r.Method, r.Path))
case "DELETE":
e.DELETE(r.Path, echoHandler(r.Method, r.Path))
}
}
}
func echoHandler(method, path string) echo.HandlerFunc {
return func(c echo.Context) error {
return c.String(http.StatusOK, "OK")
}
}
func Benchmark_Echo_Static(b *testing.B) {
e := echo.New()
loadEchoRoutes(e, static)
benchmarkRoutes(b, e, static)
}
func Benchmark_Echo_GitHubAPI(b *testing.B) {
e := echo.New()
loadEchoRoutes(e, githubAPI)
benchmarkRoutes(b, e, githubAPI)
}
func Benchmark_Echo_GplusAPI(b *testing.B) {
e := echo.New()
loadEchoRoutes(e, gplusAPI)
benchmarkRoutes(b, e, gplusAPI)
}
func Benchmark_Echo_ParseAPI(b *testing.B) {
e := echo.New()
loadEchoRoutes(e, parseAPI)
benchmarkRoutes(b, e, parseAPI)
}
// gorouter
func loadGorouterRoutes(e *gorouter.Router, routes []*Route) {
for _, r := range routes {
switch r.Method {
case "GET":
e.GET(r.Path, goHandler(r.Method, r.Path))
case "POST":
e.POST(r.Path, goHandler(r.Method, r.Path))
case "PUT":
e.PUT(r.Path, goHandler(r.Method, r.Path))
case "DELETE":
e.DELETE(r.Path, goHandler(r.Method, r.Path))
}
}
}
func goHandler(method, path string) gorouter.Handle {
return func(w http.ResponseWriter, req *http.Request, p *gorouter.Param) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
}
}
func Benchmark_Gorouter_Static(b *testing.B) {
e := gorouter.New()
loadGorouterRoutes(e, static)
benchmarkRoutes(b, e, static)
}
func Benchmark_Gorouter_GitHubAPI(b *testing.B) {
e := gorouter.New()
loadGorouterRoutes(e, githubAPI)
benchmarkRoutes(b, e, githubAPI)
}
func Benchmark_Gorouter_GplusAPI(b *testing.B) {
e := gorouter.New()
loadGorouterRoutes(e, gplusAPI)
benchmarkRoutes(b, e, gplusAPI)
}
func Benchmark_Gorouter_ParseAPI(b *testing.B) {
e := gorouter.New()
loadGorouterRoutes(e, parseAPI)
benchmarkRoutes(b, e, parseAPI)
}
// gin
func loadGinRoutes(g *gin.Engine, routes []*Route) {
for i := 0; i < num; i++ {
g.Use(func(*gin.Context) {})
}
if os.Getenv("SP") != "" {
g.Use(func(ctx *gin.Context) {
fmt.Println("gin path", ctx.FullPath())
})
}
for _, r := range routes {
switch r.Method {
case "GET":
g.GET(r.Path, ginHandler(r.Method, r.Path))
case "POST":
g.POST(r.Path, ginHandler(r.Method, r.Path))
case "PATCH":
g.PATCH(r.Path, ginHandler(r.Method, r.Path))
case "PUT":
g.PUT(r.Path, ginHandler(r.Method, r.Path))
case "DELETE":
g.DELETE(r.Path, ginHandler(r.Method, r.Path))
}
}
}
func ginHandler(method, path string) gin.HandlerFunc {
return func(c *gin.Context) {
c.String(http.StatusOK, "OK")
}
}
func Benchmark_Gin_Static(b *testing.B) {
gin.SetMode(gin.ReleaseMode)
g := gin.New()
loadGinRoutes(g, static)
benchmarkRoutes(b, g, static)
}
func Benchmark_Gin_GitHubAPI(b *testing.B) {
gin.SetMode(gin.ReleaseMode)
g := gin.New()
loadGinRoutes(g, githubAPI)
benchmarkRoutes(b, g, githubAPI)
}
func Benchmark_Gin_GplusAPI(b *testing.B) {
gin.SetMode(gin.ReleaseMode)
g := gin.New()
loadGinRoutes(g, gplusAPI)
benchmarkRoutes(b, g, gplusAPI)
}
func Benchmark_Gin_ParseAPI(b *testing.B) {
gin.SetMode(gin.ReleaseMode)
g := gin.New()
loadGinRoutes(g, parseAPI)
benchmarkRoutes(b, g, parseAPI)
}
// beego
func loadBeegoRoutes(app *beego.App, routes []*Route) {
for _, r := range routes {
switch r.Method {
case "GET":
app.Handlers.Get(r.Path, beegoHandler(r.Method, r.Path))
case "POST":
app.Handlers.Post(r.Path, beegoHandler(r.Method, r.Path))
case "PATCH":
app.Handlers.Patch(r.Path, beegoHandler(r.Method, r.Path))
case "PUT":
app.Handlers.Put(r.Path, beegoHandler(r.Method, r.Path))
case "DELETE":
app.Handlers.Delete(r.Path, beegoHandler(r.Method, r.Path))
}
}
}
func beegoHandler(method, path string) beego.FilterFunc {
return func(ctx *beegocontext.Context) {
ctx.Output.Body([]byte("OK"))
}
}
func Benchmark_Beego_Static(b *testing.B) {
if num > 0 {
return
}
beego.SetLevel(beego.LevelEmergency)
app := beego.NewApp()
loadBeegoRoutes(app, static)
benchmarkRoutes(b, app.Handlers, static)
}
func Benchmark_Beego_GitHubAPI(b *testing.B) {
if num > 0 {
return
}
app := beego.NewApp()
loadBeegoRoutes(app, githubAPI)
benchmarkRoutes(b, app.Handlers, githubAPI)
}
func Benchmark_Beego_GplusAPI(b *testing.B) {
if num > 0 {
return
}
app := beego.NewApp()
loadBeegoRoutes(app, gplusAPI)
benchmarkRoutes(b, app.Handlers, gplusAPI)
}
func Benchmark_Beego_ParseAPI(b *testing.B) {
if num > 0 {
return
}
app := beego.NewApp()
loadBeegoRoutes(app, parseAPI)
benchmarkRoutes(b, app.Handlers, parseAPI)
}
// httprouter
func loadHttprouterRoutes(g *httprouter.Router, routes []*Route) {
for _, r := range routes {
switch r.Method {
case "GET":
g.GET(r.Path, httprouterHandler(r.Method, r.Path))
case "POST":
g.POST(r.Path, httprouterHandler(r.Method, r.Path))
case "PATCH":
g.PATCH(r.Path, httprouterHandler(r.Method, r.Path))
case "PUT":
g.PUT(r.Path, httprouterHandler(r.Method, r.Path))
case "DELETE":
g.DELETE(r.Path, httprouterHandler(r.Method, r.Path))
}
}
}
func httprouterHandler(method, path string) httprouter.Handle {
return func(w http.ResponseWriter, req *http.Request, p httprouter.Params) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
}
}
func Benchmark_Httprouter_Static(b *testing.B) {
if num > 0 {
return
}
router := httprouter.New()
loadHttprouterRoutes(router, static)
benchmarkRoutes(b, router, static)
}
func Benchmark_Httprouter_GitHubAPI(b *testing.B) {
if num > 0 {
return
}
router := httprouter.New()
loadHttprouterRoutes(router, githubAPI)
benchmarkRoutes(b, router, githubAPI)
}
func Benchmark_Httprouter_GplusAPI(b *testing.B) {
if num > 0 {
return
}
router := httprouter.New()
loadHttprouterRoutes(router, gplusAPI)
benchmarkRoutes(b, router, gplusAPI)
}
func Benchmark_Httprouter_ParseAPI(b *testing.B) {
if num > 0 {
return
}
router := httprouter.New()
loadHttprouterRoutes(router, parseAPI)
benchmarkRoutes(b, router, parseAPI)
} | {"GET", "/repos/:owner/:repo/commits/:sha/comments"}, |
walletTransaction.routes.js | // 1. import module Router
import { Router } from 'express'; |
const router = Router();
router.get('/', indexCtrl.walletTransaction.readWalletTransaction);
router.post('/insertWalletTransaction',indexCtrl.walletTransaction.addWalletTransaction);
router.put('/updateWalletTransaction/:watr_id',indexCtrl.walletTransaction.editWalletTransaction);
router.delete('/deleteWalletTransaction/:watr_id',indexCtrl.walletTransaction.deleteWalletTransaction);
export default router; | import indexCtrl from '../controllers/IndexController' |
json.rs | //! A JSON emitter for errors.
//!
//! This works by converting errors to a simplified structural format (see the
//! structs at the start of the file) and then serializing them. These should
//! contain as much information about the error as possible.
//!
//! The format of the JSON output should be considered *unstable*. For now the
//! structs at the end of this file (Diagnostic*) specify the error format.
// FIXME: spec the JSON output properly.
use rustc_span::source_map::{FilePathMapping, SourceMap};
use crate::emitter::{Emitter, HumanReadableErrorType};
use crate::registry::Registry;
use crate::DiagnosticId;
use crate::ToolMetadata;
use crate::{CodeSuggestion, FluentBundle, MultiSpan, SpanLabel, SubDiagnostic};
use rustc_lint_defs::Applicability;
use rustc_data_structures::sync::Lrc;
use rustc_error_messages::FluentArgs;
use rustc_span::hygiene::ExpnData;
use rustc_span::Span;
use std::io::{self, Write};
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::vec;
use rustc_serialize::json::{as_json, as_pretty_json};
use rustc_serialize::{Encodable, Encoder};
#[cfg(test)]
mod tests;
pub struct JsonEmitter {
dst: Box<dyn Write + Send>,
registry: Option<Registry>,
sm: Lrc<SourceMap>,
fluent_bundle: Option<Lrc<FluentBundle>>,
fallback_bundle: Lrc<FluentBundle>,
pretty: bool,
ui_testing: bool,
json_rendered: HumanReadableErrorType,
terminal_width: Option<usize>,
macro_backtrace: bool,
}
impl JsonEmitter {
pub fn stderr(
registry: Option<Registry>,
source_map: Lrc<SourceMap>,
fluent_bundle: Option<Lrc<FluentBundle>>,
fallback_bundle: Lrc<FluentBundle>,
pretty: bool,
json_rendered: HumanReadableErrorType,
terminal_width: Option<usize>,
macro_backtrace: bool,
) -> JsonEmitter {
JsonEmitter {
dst: Box::new(io::BufWriter::new(io::stderr())),
registry,
sm: source_map,
fluent_bundle,
fallback_bundle,
pretty,
ui_testing: false,
json_rendered,
terminal_width,
macro_backtrace,
}
}
pub fn basic(
pretty: bool,
json_rendered: HumanReadableErrorType,
fluent_bundle: Option<Lrc<FluentBundle>>,
fallback_bundle: Lrc<FluentBundle>,
terminal_width: Option<usize>,
macro_backtrace: bool,
) -> JsonEmitter {
let file_path_mapping = FilePathMapping::empty();
JsonEmitter::stderr(
None,
Lrc::new(SourceMap::new(file_path_mapping)),
fluent_bundle,
fallback_bundle,
pretty,
json_rendered,
terminal_width,
macro_backtrace,
)
}
pub fn new(
dst: Box<dyn Write + Send>,
registry: Option<Registry>,
source_map: Lrc<SourceMap>,
fluent_bundle: Option<Lrc<FluentBundle>>,
fallback_bundle: Lrc<FluentBundle>,
pretty: bool,
json_rendered: HumanReadableErrorType,
terminal_width: Option<usize>,
macro_backtrace: bool,
) -> JsonEmitter {
JsonEmitter {
dst,
registry,
sm: source_map,
fluent_bundle,
fallback_bundle,
pretty,
ui_testing: false,
json_rendered,
terminal_width,
macro_backtrace,
}
}
pub fn ui_testing(self, ui_testing: bool) -> Self {
Self { ui_testing, ..self }
}
}
impl Emitter for JsonEmitter {
fn emit_diagnostic(&mut self, diag: &crate::Diagnostic) {
let data = Diagnostic::from_errors_diagnostic(diag, self);
let result = if self.pretty {
writeln!(&mut self.dst, "{}", as_pretty_json(&data))
} else {
writeln!(&mut self.dst, "{}", as_json(&data))
}
.and_then(|_| self.dst.flush());
if let Err(e) = result {
panic!("failed to print diagnostics: {:?}", e);
}
}
fn emit_artifact_notification(&mut self, path: &Path, artifact_type: &str) {
let data = ArtifactNotification { artifact: path, emit: artifact_type };
let result = if self.pretty {
writeln!(&mut self.dst, "{}", as_pretty_json(&data))
} else {
writeln!(&mut self.dst, "{}", as_json(&data))
}
.and_then(|_| self.dst.flush());
if let Err(e) = result {
panic!("failed to print notification: {:?}", e);
}
}
fn emit_future_breakage_report(&mut self, diags: Vec<crate::Diagnostic>) {
let data: Vec<FutureBreakageItem> = diags
.into_iter()
.map(|mut diag| {
if diag.level == crate::Level::Allow {
diag.level = crate::Level::Warning;
}
FutureBreakageItem { diagnostic: Diagnostic::from_errors_diagnostic(&diag, self) }
})
.collect();
let report = FutureIncompatReport { future_incompat_report: data };
let result = if self.pretty {
writeln!(&mut self.dst, "{}", as_pretty_json(&report))
} else {
writeln!(&mut self.dst, "{}", as_json(&report))
}
.and_then(|_| self.dst.flush());
if let Err(e) = result {
panic!("failed to print future breakage report: {:?}", e);
}
}
fn emit_unused_externs(&mut self, lint_level: &str, unused_externs: &[&str]) {
let data = UnusedExterns { lint_level, unused_extern_names: unused_externs };
let result = if self.pretty {
writeln!(&mut self.dst, "{}", as_pretty_json(&data))
} else {
writeln!(&mut self.dst, "{}", as_json(&data))
}
.and_then(|_| self.dst.flush());
if let Err(e) = result {
panic!("failed to print unused externs: {:?}", e);
}
}
fn source_map(&self) -> Option<&Lrc<SourceMap>> {
Some(&self.sm)
}
fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
self.fluent_bundle.as_ref()
}
fn fallback_fluent_bundle(&self) -> &Lrc<FluentBundle> {
&self.fallback_bundle
}
fn should_show_explain(&self) -> bool {
!matches!(self.json_rendered, HumanReadableErrorType::Short(_))
}
}
// The following data types are provided just for serialisation.
// NOTE: this has a manual implementation of Encodable which needs to be updated in
// parallel.
struct Diagnostic {
/// The primary error message.
message: String,
code: Option<DiagnosticCode>,
/// "error: internal compiler error", "error", "warning", "note", "help".
level: &'static str,
spans: Vec<DiagnosticSpan>,
/// Associated diagnostic messages.
children: Vec<Diagnostic>,
/// The message as rustc would render it.
rendered: Option<String>,
/// Extra tool metadata
tool_metadata: ToolMetadata,
}
macro_rules! encode_fields {
(
$enc:expr, // encoder
$idx:expr, // starting field index
$struct:expr, // struct we're serializing
$struct_name:ident, // struct name
[ $($name:ident),+$(,)? ], // fields to encode
[ $($ignore:ident),+$(,)? ] // fields we're skipping
) => {
{
// Pattern match to make sure all fields are accounted for
let $struct_name { $($name,)+ $($ignore: _,)+ } = $struct;
let mut idx = $idx;
$(
$enc.emit_struct_field(
stringify!($name),
idx == 0,
|enc| $name.encode(enc),
)?;
idx += 1;
)+
idx
}
};
}
// Special-case encoder to skip tool_metadata if not set
impl<E: Encoder> Encodable<E> for Diagnostic {
fn encode(&self, s: &mut E) -> Result<(), E::Error> {
s.emit_struct(false, |s| {
let mut idx = 0;
idx = encode_fields!(
s,
idx,
self,
Self,
[message, code, level, spans, children, rendered],
[tool_metadata]
);
if self.tool_metadata.is_set() {
idx = encode_fields!(
s,
idx,
self,
Self,
[tool_metadata],
[message, code, level, spans, children, rendered]
);
}
let _ = idx;
Ok(())
})
}
}
#[derive(Encodable)]
struct DiagnosticSpan {
file_name: String,
byte_start: u32,
byte_end: u32,
/// 1-based.
line_start: usize,
line_end: usize,
/// 1-based, character offset.
column_start: usize,
column_end: usize,
/// Is this a "primary" span -- meaning the point, or one of the points,
/// where the error occurred?
is_primary: bool,
/// Source text from the start of line_start to the end of line_end.
text: Vec<DiagnosticSpanLine>,
/// Label that should be placed at this location (if any)
label: Option<String>,
/// If we are suggesting a replacement, this will contain text
/// that should be sliced in atop this span.
suggested_replacement: Option<String>,
/// If the suggestion is approximate
suggestion_applicability: Option<Applicability>,
/// Macro invocations that created the code at this span, if any.
expansion: Option<Box<DiagnosticSpanMacroExpansion>>,
}
#[derive(Encodable)]
struct DiagnosticSpanLine {
text: String,
/// 1-based, character offset in self.text.
highlight_start: usize,
highlight_end: usize,
}
#[derive(Encodable)]
struct DiagnosticSpanMacroExpansion {
/// span where macro was applied to generate this code; note that
/// this may itself derive from a macro (if
/// `span.expansion.is_some()`)
span: DiagnosticSpan,
/// name of macro that was applied (e.g., "foo!" or "#[derive(Eq)]")
macro_decl_name: String,
/// span where macro was defined (if known)
def_site_span: DiagnosticSpan,
}
#[derive(Encodable)]
struct DiagnosticCode {
/// The code itself.
code: String,
/// An explanation for the code.
explanation: Option<&'static str>,
}
#[derive(Encodable)]
struct ArtifactNotification<'a> {
/// The path of the artifact.
artifact: &'a Path,
/// What kind of artifact we're emitting.
emit: &'a str,
}
#[derive(Encodable)]
struct FutureBreakageItem {
diagnostic: Diagnostic,
}
#[derive(Encodable)]
struct FutureIncompatReport {
future_incompat_report: Vec<FutureBreakageItem>,
}
// NOTE: Keep this in sync with the equivalent structs in rustdoc's
// doctest component (as well as cargo).
// We could unify this struct the one in rustdoc but they have different
// ownership semantics, so doing so would create wasteful allocations.
#[derive(Encodable)]
struct | <'a, 'b, 'c> {
/// The severity level of the unused dependencies lint
lint_level: &'a str,
/// List of unused externs by their names.
unused_extern_names: &'b [&'c str],
}
impl Diagnostic {
fn from_errors_diagnostic(diag: &crate::Diagnostic, je: &JsonEmitter) -> Diagnostic {
let args = je.to_fluent_args(diag.args());
let sugg = diag.suggestions.iter().flatten().map(|sugg| {
let translated_message = je.translate_message(&sugg.msg, &args);
Diagnostic {
message: translated_message.to_string(),
code: None,
level: "help",
spans: DiagnosticSpan::from_suggestion(sugg, &args, je),
children: vec![],
rendered: None,
tool_metadata: sugg.tool_metadata.clone(),
}
});
// generate regular command line output and store it in the json
// A threadsafe buffer for writing.
#[derive(Default, Clone)]
struct BufWriter(Arc<Mutex<Vec<u8>>>);
impl Write for BufWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.lock().unwrap().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.0.lock().unwrap().flush()
}
}
let buf = BufWriter::default();
let output = buf.clone();
je.json_rendered
.new_emitter(
Box::new(buf),
Some(je.sm.clone()),
je.fluent_bundle.clone(),
je.fallback_bundle.clone(),
false,
je.terminal_width,
je.macro_backtrace,
)
.ui_testing(je.ui_testing)
.emit_diagnostic(diag);
let output = Arc::try_unwrap(output.0).unwrap().into_inner().unwrap();
let output = String::from_utf8(output).unwrap();
let translated_message = je.translate_messages(&diag.message, &args);
Diagnostic {
message: translated_message.to_string(),
code: DiagnosticCode::map_opt_string(diag.code.clone(), je),
level: diag.level.to_str(),
spans: DiagnosticSpan::from_multispan(&diag.span, &args, je),
children: diag
.children
.iter()
.map(|c| Diagnostic::from_sub_diagnostic(c, &args, je))
.chain(sugg)
.collect(),
rendered: Some(output),
tool_metadata: ToolMetadata::default(),
}
}
fn from_sub_diagnostic(
diag: &SubDiagnostic,
args: &FluentArgs<'_>,
je: &JsonEmitter,
) -> Diagnostic {
let translated_message = je.translate_messages(&diag.message, args);
Diagnostic {
message: translated_message.to_string(),
code: None,
level: diag.level.to_str(),
spans: diag
.render_span
.as_ref()
.map(|sp| DiagnosticSpan::from_multispan(sp, args, je))
.unwrap_or_else(|| DiagnosticSpan::from_multispan(&diag.span, args, je)),
children: vec![],
rendered: None,
tool_metadata: ToolMetadata::default(),
}
}
}
impl DiagnosticSpan {
fn from_span_label(
span: SpanLabel,
suggestion: Option<(&String, Applicability)>,
args: &FluentArgs<'_>,
je: &JsonEmitter,
) -> DiagnosticSpan {
Self::from_span_etc(
span.span,
span.is_primary,
span.label.as_ref().map(|m| je.translate_message(m, args)).map(|m| m.to_string()),
suggestion,
je,
)
}
fn from_span_etc(
span: Span,
is_primary: bool,
label: Option<String>,
suggestion: Option<(&String, Applicability)>,
je: &JsonEmitter,
) -> DiagnosticSpan {
// obtain the full backtrace from the `macro_backtrace`
// helper; in some ways, it'd be better to expand the
// backtrace ourselves, but the `macro_backtrace` helper makes
// some decision, such as dropping some frames, and I don't
// want to duplicate that logic here.
let backtrace = span.macro_backtrace();
DiagnosticSpan::from_span_full(span, is_primary, label, suggestion, backtrace, je)
}
fn from_span_full(
span: Span,
is_primary: bool,
label: Option<String>,
suggestion: Option<(&String, Applicability)>,
mut backtrace: impl Iterator<Item = ExpnData>,
je: &JsonEmitter,
) -> DiagnosticSpan {
let start = je.sm.lookup_char_pos(span.lo());
let end = je.sm.lookup_char_pos(span.hi());
let backtrace_step = backtrace.next().map(|bt| {
let call_site = Self::from_span_full(bt.call_site, false, None, None, backtrace, je);
let def_site_span = Self::from_span_full(
je.sm.guess_head_span(bt.def_site),
false,
None,
None,
[].into_iter(),
je,
);
Box::new(DiagnosticSpanMacroExpansion {
span: call_site,
macro_decl_name: bt.kind.descr(),
def_site_span,
})
});
DiagnosticSpan {
file_name: je.sm.filename_for_diagnostics(&start.file.name).to_string(),
byte_start: start.file.original_relative_byte_pos(span.lo()).0,
byte_end: start.file.original_relative_byte_pos(span.hi()).0,
line_start: start.line,
line_end: end.line,
column_start: start.col.0 + 1,
column_end: end.col.0 + 1,
is_primary,
text: DiagnosticSpanLine::from_span(span, je),
suggested_replacement: suggestion.map(|x| x.0.clone()),
suggestion_applicability: suggestion.map(|x| x.1),
expansion: backtrace_step,
label,
}
}
fn from_multispan(
msp: &MultiSpan,
args: &FluentArgs<'_>,
je: &JsonEmitter,
) -> Vec<DiagnosticSpan> {
msp.span_labels()
.into_iter()
.map(|span_str| Self::from_span_label(span_str, None, args, je))
.collect()
}
fn from_suggestion(
suggestion: &CodeSuggestion,
args: &FluentArgs<'_>,
je: &JsonEmitter,
) -> Vec<DiagnosticSpan> {
suggestion
.substitutions
.iter()
.flat_map(|substitution| {
substitution.parts.iter().map(move |suggestion_inner| {
let span_label =
SpanLabel { span: suggestion_inner.span, is_primary: true, label: None };
DiagnosticSpan::from_span_label(
span_label,
Some((&suggestion_inner.snippet, suggestion.applicability)),
args,
je,
)
})
})
.collect()
}
}
impl DiagnosticSpanLine {
fn line_from_source_file(
sf: &rustc_span::SourceFile,
index: usize,
h_start: usize,
h_end: usize,
) -> DiagnosticSpanLine {
DiagnosticSpanLine {
text: sf.get_line(index).map_or_else(String::new, |l| l.into_owned()),
highlight_start: h_start,
highlight_end: h_end,
}
}
/// Creates a list of DiagnosticSpanLines from span - each line with any part
/// of `span` gets a DiagnosticSpanLine, with the highlight indicating the
/// `span` within the line.
fn from_span(span: Span, je: &JsonEmitter) -> Vec<DiagnosticSpanLine> {
je.sm
.span_to_lines(span)
.map(|lines| {
// We can't get any lines if the source is unavailable.
if !je.sm.ensure_source_file_source_present(lines.file.clone()) {
return vec![];
}
let sf = &*lines.file;
lines
.lines
.iter()
.map(|line| {
DiagnosticSpanLine::line_from_source_file(
sf,
line.line_index,
line.start_col.0 + 1,
line.end_col.0 + 1,
)
})
.collect()
})
.unwrap_or_else(|_| vec![])
}
}
impl DiagnosticCode {
fn map_opt_string(s: Option<DiagnosticId>, je: &JsonEmitter) -> Option<DiagnosticCode> {
s.map(|s| {
let s = match s {
DiagnosticId::Error(s) => s,
DiagnosticId::Lint { name, .. } => name,
};
let je_result =
je.registry.as_ref().map(|registry| registry.try_find_description(&s)).unwrap();
DiagnosticCode { code: s, explanation: je_result.unwrap_or(None) }
})
}
}
| UnusedExterns |
model.go | package agent
// RespGetAgent 查询应用响应
type RespGetAgent struct {
RespCommon
AgentID int64 `json:"agentid"`
Name string `json:"name"`
SquareLogoURL string `json:"square_logo_url"`
Description string `json:"description"`
AllowUserinfos struct {
User []struct {
Userid string `json:"userid"`
} `json:"user"`
} `json:"allow_userinfos"`
AllowPartys struct {
Partyid []int `json:"partyid"`
} `json:"allow_partys"`
AllowTags struct {
Tagid []int `json:"tagid"`
} `json:"allow_tags"`
Close int `json:"close"`
RedirectDomain string `json:"redirect_domain"`
ReportLocationFlag int `json:"report_location_flag"`
Isreportenter int `json:"isreportenter"`
HomeURL string `json:"home_url"`
}
// ReqSetAgent 设置应用请求
type ReqSetAgent struct {
AgentID int64 `json:"agentid"`
ReportLocationFlag int `json:"report_location_flag,omitempty"`
LogoMediaID string `json:"logo_mediaid,omitempty"`
Name string `json:"name,omitempty"`
Description string `json:"description,omitempty"`
RedirectDomain string `json:"redirect_domain,omitempty"`
IsReportEnter int `json:"isreportenter,omitempty"`
HomeURL string `json:"home_url,omitempty"`
}
type RespListAgents struct {
*RespCommon
AgentList []AgentItem `json:"agentlist"`
}
type AgentItem struct { | Agentid int `json:"agentid"`
Name string `json:"name"`
SquareLogoURL string `json:"square_logo_url"`
}
// RespCommon Comman Response Struct
type RespCommon struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
} | |
truffle.js | var HDWalletProvider = require("truffle-hdwallet-provider");
// Be sure to match this mnemonic with that in Ganache!
var mnemonic = "candy maple cake sugar pudding cream honey rich smooth crumble sweet treat";
module.exports = {
networks: {
development: {
provider: function() {
return new HDWalletProvider(mnemonic, "http://127.0.0.1:8545/", 0, 10);
},
network_id: '*',
gas: 9999999
}
},
compilers: {
solc: {
version: "0.4.24", // Fetch exact version from solc-bin (default: truffle's version)
// docker: true, // Use "0.5.1" you've installed locally with docker (default: false)
// settings: { // See the solidity docs for advice about optimization and evmVersion
// optimizer: {
// enabled: false,
// runs: 200 | }
}; | // },
// evmVersion: "byzantium"
// }
} |
admin.guard.ts | import { Injectable } from '@angular/core';
import {
CanActivate,
ActivatedRouteSnapshot,
RouterStateSnapshot,
Router
} from '@angular/router';
import { Observable } from 'rxjs';
import { AuthService } from '../services/auth.service';
@Injectable({
providedIn: 'root'
})
export class | implements CanActivate {
constructor(private authService: AuthService, private router: Router) { }
canActivate(
next: ActivatedRouteSnapshot,
state: RouterStateSnapshot): Observable<boolean> | Promise<boolean> | boolean {
if (this.authService.isAdmin()) {
return true;
}
this.router.navigate(['/home']);
return false;
}
}
| AdminGuard |
slack.rs | use crate::{alerts::Alert, probes::Notification};
use anyhow::Result;
use async_trait::async_trait;
use lazy_static::lazy_static;
use prometheus::{register_counter_vec, CounterVec};
use serde::Serialize;
use serde_derive::Deserialize;
#[derive(Debug, Clone, Default, Deserialize)]
pub struct Slack {
namepass: Option<Vec<String>>,
webhook_url: String,
}
lazy_static! {
static ref RUNS_TOTAL: CounterVec = register_counter_vec!(
"alert_slack_runs_total",
"run counter for slack alert plugin",
&["plugin", "webhook_url"]
)
.unwrap();
}
#[async_trait]
impl Alert for Slack {
fn new(namepass: Vec<&str>) -> Self {
Slack {
namepass: Some(namepass.into_iter().map(String::from).collect()),
..Default::default()
}
}
fn namepass(&self) -> Option<Vec<String>> {
self.namepass.clone()
}
| if !self.should_fire(¬if.name) {
log::info!("should not fire slack alert for {}", ¬if.name);
return Ok(());
}
RUNS_TOTAL
.with_label_values(&["alert.slack", "https://hooks.slack.com/services/[redacted]"])
.inc();
log::info!("sending slack alert to webhook url {}", self.webhook_url);
log::debug!("NOTIFICATION: {:?}", notif);
let pretext = format!("*TRIGGERED `{}`:* {}", notif.from, notif.title);
let mut payload = Payload {
username: "Otto".to_string(),
icon_emoji: ":robot_face:".to_string(),
text: pretext.clone(),
attachments: vec![],
};
match notif.message_entries.as_ref() {
Some(message_entries) => {
let entries_length = message_entries.len();
for (i, entry) in message_entries.iter() {
payload.attachments.push(Attachment {
title: format!("[{} of {}] {}", i + 1, entries_length, entry.title),
text: entry.description.replace("**", "*"),
color: "#ede542".to_string(),
})
}
}
None => payload.attachments.push(Attachment {
title: notif.check.clone(),
text: notif.message.replace("**", "*"),
color: "#ede542".to_string(),
}),
}
let client = reqwest::Client::new();
let result = client.post(&self.webhook_url).json(&payload).send().await;
match result {
Ok(_) => Ok(()),
Err(err) => anyhow::bail!(
"failed to post message to slack webhook url {}: {}",
self.webhook_url,
err
),
}
}
}
// slack webhook payload structs
#[derive(Debug, Serialize)]
struct Payload {
username: String,
icon_emoji: String,
text: String,
attachments: Vec<Attachment>,
}
#[derive(Debug, Serialize)]
struct Attachment {
title: String,
text: String,
color: String,
} | async fn notify(&self, notif: &Notification) -> Result<()> { |
struct_error_msg_params_in_describe_data_import_pre_check_result.go | package drds
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License. | //
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// ErrorMsgParamsInDescribeDataImportPreCheckResult is a nested struct in drds response
type ErrorMsgParamsInDescribeDataImportPreCheckResult struct {
ErrorMsgParams []string `json:"ErrorMsgParams" xml:"ErrorMsgParams"`
} | //You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0 |
networkInterface.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
// nolint: lll
package network
import (
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/go/pulumi"
)
// Manages a Network Interface located in a Virtual Network, usually attached to a Virtual Machine.
//
// > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/network_interface.html.markdown.
type NetworkInterface struct {
pulumi.CustomResourceState
// If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set
AppliedDnsServers pulumi.StringArrayOutput `pulumi:"appliedDnsServers"`
// List of DNS servers IP addresses to use for this NIC, overrides the VNet-level server list
DnsServers pulumi.StringArrayOutput `pulumi:"dnsServers"`
// Enables Azure Accelerated Networking using SR-IOV. Only certain VM instance sizes are supported. Refer to [Create a Virtual Machine with Accelerated Networking](https://docs.microsoft.com/en-us/azure/virtual-network/create-vm-accelerated-networking-cli). Defaults to `false`.
EnableAcceleratedNetworking pulumi.BoolPtrOutput `pulumi:"enableAcceleratedNetworking"`
// Enables IP Forwarding on the NIC. Defaults to `false`.
EnableIpForwarding pulumi.BoolPtrOutput `pulumi:"enableIpForwarding"`
// Relative DNS name for this NIC used for internal communications between VMs in the same VNet
InternalDnsNameLabel pulumi.StringOutput `pulumi:"internalDnsNameLabel"`
InternalFqdn pulumi.StringOutput `pulumi:"internalFqdn"`
// One or more `ipConfiguration` associated with this NIC as documented below.
IpConfigurations NetworkInterfaceIpConfigurationArrayOutput `pulumi:"ipConfigurations"`
// The location/region where the network interface is created. Changing this forces a new resource to be created.
Location pulumi.StringOutput `pulumi:"location"`
// The media access control (MAC) address of the network interface.
MacAddress pulumi.StringOutput `pulumi:"macAddress"`
// The name of the network interface. Changing this forces a new resource to be created.
Name pulumi.StringOutput `pulumi:"name"`
// The ID of the Network Security Group to associate with the network interface.
NetworkSecurityGroupId pulumi.StringPtrOutput `pulumi:"networkSecurityGroupId"`
// The first private IP address of the network interface.
PrivateIpAddress pulumi.StringOutput `pulumi:"privateIpAddress"`
// The private IP addresses of the network interface.
PrivateIpAddresses pulumi.StringArrayOutput `pulumi:"privateIpAddresses"`
// The name of the resource group in which to create the network interface. Changing this forces a new resource to be created.
ResourceGroupName pulumi.StringOutput `pulumi:"resourceGroupName"`
// A mapping of tags to assign to the resource.
Tags pulumi.StringMapOutput `pulumi:"tags"`
// Reference to a VM with which this NIC has been associated.
VirtualMachineId pulumi.StringOutput `pulumi:"virtualMachineId"`
}
// NewNetworkInterface registers a new resource with the given unique name, arguments, and options.
func NewNetworkInterface(ctx *pulumi.Context,
name string, args *NetworkInterfaceArgs, opts ...pulumi.ResourceOption) (*NetworkInterface, error) {
if args == nil || args.IpConfigurations == nil {
return nil, errors.New("missing required argument 'IpConfigurations'")
}
if args == nil || args.ResourceGroupName == nil {
return nil, errors.New("missing required argument 'ResourceGroupName'")
}
if args == nil {
args = &NetworkInterfaceArgs{}
}
var resource NetworkInterface
err := ctx.RegisterResource("azure:network/networkInterface:NetworkInterface", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetNetworkInterface gets an existing NetworkInterface resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetNetworkInterface(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *NetworkInterfaceState, opts ...pulumi.ResourceOption) (*NetworkInterface, error) {
var resource NetworkInterface
err := ctx.ReadResource("azure:network/networkInterface:NetworkInterface", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering NetworkInterface resources.
type networkInterfaceState struct {
// If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set
AppliedDnsServers []string `pulumi:"appliedDnsServers"`
// List of DNS servers IP addresses to use for this NIC, overrides the VNet-level server list
DnsServers []string `pulumi:"dnsServers"`
// Enables Azure Accelerated Networking using SR-IOV. Only certain VM instance sizes are supported. Refer to [Create a Virtual Machine with Accelerated Networking](https://docs.microsoft.com/en-us/azure/virtual-network/create-vm-accelerated-networking-cli). Defaults to `false`.
EnableAcceleratedNetworking *bool `pulumi:"enableAcceleratedNetworking"`
// Enables IP Forwarding on the NIC. Defaults to `false`.
EnableIpForwarding *bool `pulumi:"enableIpForwarding"`
// Relative DNS name for this NIC used for internal communications between VMs in the same VNet
InternalDnsNameLabel *string `pulumi:"internalDnsNameLabel"`
InternalFqdn *string `pulumi:"internalFqdn"`
// One or more `ipConfiguration` associated with this NIC as documented below.
IpConfigurations []NetworkInterfaceIpConfiguration `pulumi:"ipConfigurations"`
// The location/region where the network interface is created. Changing this forces a new resource to be created.
Location *string `pulumi:"location"`
// The media access control (MAC) address of the network interface.
MacAddress *string `pulumi:"macAddress"`
// The name of the network interface. Changing this forces a new resource to be created.
Name *string `pulumi:"name"`
// The ID of the Network Security Group to associate with the network interface.
NetworkSecurityGroupId *string `pulumi:"networkSecurityGroupId"`
// The first private IP address of the network interface.
PrivateIpAddress *string `pulumi:"privateIpAddress"`
// The private IP addresses of the network interface.
PrivateIpAddresses []string `pulumi:"privateIpAddresses"`
// The name of the resource group in which to create the network interface. Changing this forces a new resource to be created.
ResourceGroupName *string `pulumi:"resourceGroupName"`
// A mapping of tags to assign to the resource.
Tags map[string]string `pulumi:"tags"`
// Reference to a VM with which this NIC has been associated.
VirtualMachineId *string `pulumi:"virtualMachineId"`
}
type NetworkInterfaceState struct {
// If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set
AppliedDnsServers pulumi.StringArrayInput
// List of DNS servers IP addresses to use for this NIC, overrides the VNet-level server list
DnsServers pulumi.StringArrayInput
// Enables Azure Accelerated Networking using SR-IOV. Only certain VM instance sizes are supported. Refer to [Create a Virtual Machine with Accelerated Networking](https://docs.microsoft.com/en-us/azure/virtual-network/create-vm-accelerated-networking-cli). Defaults to `false`.
EnableAcceleratedNetworking pulumi.BoolPtrInput
// Enables IP Forwarding on the NIC. Defaults to `false`.
EnableIpForwarding pulumi.BoolPtrInput
// Relative DNS name for this NIC used for internal communications between VMs in the same VNet
InternalDnsNameLabel pulumi.StringPtrInput
InternalFqdn pulumi.StringPtrInput
// One or more `ipConfiguration` associated with this NIC as documented below.
IpConfigurations NetworkInterfaceIpConfigurationArrayInput
// The location/region where the network interface is created. Changing this forces a new resource to be created.
Location pulumi.StringPtrInput
// The media access control (MAC) address of the network interface.
MacAddress pulumi.StringPtrInput
// The name of the network interface. Changing this forces a new resource to be created.
Name pulumi.StringPtrInput
// The ID of the Network Security Group to associate with the network interface.
NetworkSecurityGroupId pulumi.StringPtrInput
// The first private IP address of the network interface.
PrivateIpAddress pulumi.StringPtrInput
// The private IP addresses of the network interface.
PrivateIpAddresses pulumi.StringArrayInput
// The name of the resource group in which to create the network interface. Changing this forces a new resource to be created.
ResourceGroupName pulumi.StringPtrInput
// A mapping of tags to assign to the resource.
Tags pulumi.StringMapInput
// Reference to a VM with which this NIC has been associated.
VirtualMachineId pulumi.StringPtrInput
}
func (NetworkInterfaceState) ElementType() reflect.Type {
return reflect.TypeOf((*networkInterfaceState)(nil)).Elem()
}
type networkInterfaceArgs struct {
// If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set
AppliedDnsServers []string `pulumi:"appliedDnsServers"`
// List of DNS servers IP addresses to use for this NIC, overrides the VNet-level server list
DnsServers []string `pulumi:"dnsServers"`
// Enables Azure Accelerated Networking using SR-IOV. Only certain VM instance sizes are supported. Refer to [Create a Virtual Machine with Accelerated Networking](https://docs.microsoft.com/en-us/azure/virtual-network/create-vm-accelerated-networking-cli). Defaults to `false`.
EnableAcceleratedNetworking *bool `pulumi:"enableAcceleratedNetworking"`
// Enables IP Forwarding on the NIC. Defaults to `false`.
EnableIpForwarding *bool `pulumi:"enableIpForwarding"`
// Relative DNS name for this NIC used for internal communications between VMs in the same VNet
InternalDnsNameLabel *string `pulumi:"internalDnsNameLabel"`
InternalFqdn *string `pulumi:"internalFqdn"`
// One or more `ipConfiguration` associated with this NIC as documented below.
IpConfigurations []NetworkInterfaceIpConfiguration `pulumi:"ipConfigurations"`
// The location/region where the network interface is created. Changing this forces a new resource to be created.
Location *string `pulumi:"location"`
// The media access control (MAC) address of the network interface.
MacAddress *string `pulumi:"macAddress"`
// The name of the network interface. Changing this forces a new resource to be created.
Name *string `pulumi:"name"`
// The ID of the Network Security Group to associate with the network interface.
NetworkSecurityGroupId *string `pulumi:"networkSecurityGroupId"`
// The name of the resource group in which to create the network interface. Changing this forces a new resource to be created. | // Reference to a VM with which this NIC has been associated.
VirtualMachineId *string `pulumi:"virtualMachineId"`
}
// The set of arguments for constructing a NetworkInterface resource.
type NetworkInterfaceArgs struct {
// If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set
AppliedDnsServers pulumi.StringArrayInput
// List of DNS servers IP addresses to use for this NIC, overrides the VNet-level server list
DnsServers pulumi.StringArrayInput
// Enables Azure Accelerated Networking using SR-IOV. Only certain VM instance sizes are supported. Refer to [Create a Virtual Machine with Accelerated Networking](https://docs.microsoft.com/en-us/azure/virtual-network/create-vm-accelerated-networking-cli). Defaults to `false`.
EnableAcceleratedNetworking pulumi.BoolPtrInput
// Enables IP Forwarding on the NIC. Defaults to `false`.
EnableIpForwarding pulumi.BoolPtrInput
// Relative DNS name for this NIC used for internal communications between VMs in the same VNet
InternalDnsNameLabel pulumi.StringPtrInput
InternalFqdn pulumi.StringPtrInput
// One or more `ipConfiguration` associated with this NIC as documented below.
IpConfigurations NetworkInterfaceIpConfigurationArrayInput
// The location/region where the network interface is created. Changing this forces a new resource to be created.
Location pulumi.StringPtrInput
// The media access control (MAC) address of the network interface.
MacAddress pulumi.StringPtrInput
// The name of the network interface. Changing this forces a new resource to be created.
Name pulumi.StringPtrInput
// The ID of the Network Security Group to associate with the network interface.
NetworkSecurityGroupId pulumi.StringPtrInput
// The name of the resource group in which to create the network interface. Changing this forces a new resource to be created.
ResourceGroupName pulumi.StringInput
// A mapping of tags to assign to the resource.
Tags pulumi.StringMapInput
// Reference to a VM with which this NIC has been associated.
VirtualMachineId pulumi.StringPtrInput
}
func (NetworkInterfaceArgs) ElementType() reflect.Type {
return reflect.TypeOf((*networkInterfaceArgs)(nil)).Elem()
} | ResourceGroupName string `pulumi:"resourceGroupName"`
// A mapping of tags to assign to the resource.
Tags map[string]string `pulumi:"tags"` |
event_transport_test.py | '''
Copyright (c) 2011-2017, Agora Games, LLC All rights reserved.
https://github.com/agoragames/haigha/blob/master/LICENSE.txt
'''
from chai import Chai
from haigha.transports import event_transport
from haigha.transports.event_transport import *
class EventTransportTest(Chai):
def setUp(self):
super(EventTransportTest, self).setUp()
self.connection = mock()
self.transport = EventTransport(self.connection)
self.transport._host = 'server'
def test_sock_close_cb(self):
expect(self.connection.transport_closed).args(
msg='socket to server closed unexpectedly')
self.transport._sock_close_cb('sock')
def test_sock_error_cb(self):
expect(self.connection.transport_closed).args(
msg='error on connection to server: amsg')
self.transport._sock_error_cb('sock', 'amsg')
def test_sock_read_cb(self):
expect(self.connection.read_frames)
self.transport._sock_read_cb('sock')
def test_connect(self):
sock = mock()
mock(event_transport, 'EventSocket')
self.connection._connect_timeout = 4.12
self.connection._sock_opts = {
('family', 'tcp'): 34,
('range', 'ipv6'): 'hex'
}
expect(event_transport.EventSocket).args(
read_cb=self.transport._sock_read_cb,
close_cb=self.transport._sock_close_cb,
error_cb=self.transport._sock_error_cb,
debug=self.connection.debug,
logger=self.connection.logger,
).returns(sock)
expect(sock.setsockopt).args('family', 'tcp', 34).any_order()
expect(sock.setsockopt).args('range', 'ipv6', 'hex').any_order()
expect(sock.setblocking).args(False)
expect(sock.connect).args(('host', 5309), timeout=4.12)
self.transport.connect(('host', 5309))
def test_read(self):
self.transport._heartbeat_timeout = None
self.transport._sock = mock()
expect(self.transport._sock.read).returns('buffereddata')
assert_equals('buffereddata', self.transport.read())
def test_read_with_timeout_and_no_current_one(self):
self.transport._heartbeat_timeout = None
self.transport._sock = mock()
mock(event_transport, 'event')
expect(event_transport.event.timeout).args(
'timeout', self.transport._sock_read_cb, self.transport._sock).returns(
'timer')
expect(self.transport._sock.read).returns('buffereddata')
assert_equals('buffereddata', self.transport.read('timeout'))
assert_equals('timer', self.transport._heartbeat_timeout)
def test_read_with_timeout_and_current_one(self):
self.transport._heartbeat_timeout = mock()
self.transport._sock = mock()
mock(event_transport, 'event')
expect(self.transport._heartbeat_timeout.delete)
expect(event_transport.event.timeout).args(
'timeout', self.transport._sock_read_cb, self.transport._sock).returns(
'timer')
expect(self.transport._sock.read).returns('buffereddata')
assert_equals('buffereddata', self.transport.read('timeout'))
assert_equals('timer', self.transport._heartbeat_timeout)
def test_read_without_timeout_but_current_one(self):
self.transport._heartbeat_timeout = mock()
self.transport._sock = mock()
mock(event_transport, 'event')
expect(self.transport._heartbeat_timeout.delete)
expect(self.transport._sock.read).returns('buffereddata')
assert_equals('buffereddata', self.transport.read())
assert_equals(None, self.transport._heartbeat_timeout)
def test_read_when_no_sock(self):
self.transport.read()
def test_buffer(self):
self.transport._sock = mock()
expect(self.transport._sock.buffer).args('somedata')
self.transport.buffer('somedata')
def test_buffer_when_no_sock(self):
self.transport.buffer('somedata')
def test_write(self):
self.transport._sock = mock()
expect(self.transport._sock.write).args('somedata')
self.transport.write('somedata')
def test_write_when_no_sock(self):
self.transport.write('somedata')
def test_disconnect(self):
self.transport._sock = mock()
self.transport._sock.close_cb = 'cb'
expect(self.transport._sock.close)
self.transport.disconnect()
assert_equals(None, self.transport._sock.close_cb)
def | (self):
self.transport.disconnect()
| test_disconnect_when_no_sock |
debugging.py | """Module containing helper functions."""
from __future__ import annotations
import collections
import logging
import sys
import traceback
from prettyqt import qt, widgets
from prettyqt.qt import QtCore
logger = logging.getLogger(__name__)
LOG_MAP = {
QtCore.QtMsgType.QtInfoMsg: 20,
QtCore.QtMsgType.QtWarningMsg: 30,
QtCore.QtMsgType.QtCriticalMsg: 40,
QtCore.QtMsgType.QtFatalMsg: 50,
}
def qt_message_handler(mode: QtCore.QtMsgType, context, message: str):
level = LOG_MAP.get(mode, 20)
logger.log(level, f"{message} ({context.file}:{context.line}, {context.file})")
def install_exceptionhook(debug: bool = False):
def handleException(exc_type, exc_value, exc_traceback):
"""Causes the application to quit in case of an unhandled exception.
Shows an error dialog before quitting when not in debugging mode.
"""
logger.critical(
f"Bug: uncaught {exc_type.__name__}",
exc_info=(exc_type, exc_value, exc_traceback),
)
if debug:
sys.exit(1)
else:
from prettyqt import widgets
# Constructing a QApplication in case this hasn't been done yet.
_ = widgets.app()
lst = traceback.format_exception(exc_type, exc_value, exc_traceback)
msg_box = widgets.MessageBox(
icon="warning",
text=f"Bug: uncaught {exc_type.__name__}",
informative_text=str(exc_value),
details="".join(lst),
)
msg_box.main_loop()
sys.exit(1)
sys.excepthook = handleException
def count_objects():
win = widgets.Application.get_mainwindow()
objects = win.findChildren(QtCore.QObject)
counter = collections.Counter([type(o) for o in objects])
logger.info(counter)
def is_deleted(obj) -> bool:
if qt.API == "pyside2":
import shiboken2
return not shiboken2.isValid(obj)
elif qt.API == "pyside6":
import shiboken6
return not shiboken6.isValid(obj)
elif qt.API == "pyqt5": | return sip.isdeleted(obj)
else:
try:
from PyQt6 import sip
except ImportError:
import sip # type: ignore[import, no-redef]
return sip.isdeleted(obj) | try:
from PyQt5 import sip
except ImportError:
import sip # type: ignore[import, no-redef] |
mod.rs | pub mod block_entity;
pub mod player;
use crate::ecs::{Manager, SystemExecStage};
use crate::entity::slime::SlimeModel;
use crate::entity::zombie::ZombieModel;
use crate::render::Texture;
use bevy_ecs::component::Component;
use bevy_ecs::prelude::*;
use cgmath::Vector3;
use collision::Aabb3;
use std::sync::Arc;
pub mod player_like;
pub mod slime;
mod systems;
pub mod versions;
pub mod zombie;
// TODO: There may be wrong entries in this!
// 1.0, 1.0, 0.0 | 0.0, 0.0, 0.0
static TEXTURE_MATRIX: [[[f32; 3]; 6]; 2] = [
[
[0.0, 1.0, 0.0], // OR (although the current one seems correct) 1 0 1 [1.0, 0.0, 1.0], // OR 1 0 1
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0], // south(back) - 0, 1, 1 | 1, 0, 1 - 0, 0, 1 displays the left half of the back (body) and the left side of the head
[1.0, 0.0, 1.0], // left(west)
[0.0, 0.0, 0.0], // right(east)
],
[
[0.0, 0.0, 0.0], | [0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
],
];
/*
resolve_textures(&tex, 8.0, 12.0, 4.0, 16.0, 16.0) // width, height, depth...
srel!(28.0, 16.0, 8.0, 4.0), // Down | 1 0 1 | 0 0 0 OR 0 1 0 | 0 0 0
srel!(20.0, 16.0, 8.0, 4.0), // Up | 0 0 1 | 0 0 0
srel!(20.0, 20.0, 8.0, 12.0), // North | 0 0 1 | 0 0 1
srel!(32.0, 20.0, 8.0, 12.0), // South | 0 1 1 | 0 0 1
srel!(16.0, 20.0, 4.0, 12.0), // West | 0 0 0 | 0 0 1
srel!(28.0, 20.0, 4.0, 12.0), // East | 0 1 0 | 0 0 1 OR 1 0 1 | 0 0 1
[1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[2.0, 0.0, 0.0, 1.0],
[2.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0],
*/
pub fn add_systems(
m: &mut Manager,
parallel: &mut SystemStage,
sync: &mut SystemStage,
entity_sched: &mut SystemStage,
) {
entity_sched.add_system(
systems::update_last_position
.system()
.label(SystemExecStage::Normal),
);
player::add_systems(m, parallel, sync, entity_sched);
entity_sched
.add_system(
systems::apply_velocity
.system()
.label(SystemExecStage::Normal),
)
.add_system(
systems::apply_gravity
.system()
.label(SystemExecStage::Normal),
);
sync.add_system(
systems::lerp_position
.system()
.label(SystemExecStage::Render)
.after(SystemExecStage::Normal),
)
.add_system(
systems::lerp_rotation
.system()
.label(SystemExecStage::Render)
.after(SystemExecStage::Normal),
)
.add_system(
systems::light_entity
.system()
.label(SystemExecStage::Render)
.after(SystemExecStage::Normal),
);
block_entity::add_systems(m, parallel, sync);
crate::particle::block_break_effect::add_systems(m, parallel, sync, entity_sched);
}
/// Location of an entity in the world.
#[derive(Component, Debug)]
pub struct Position {
pub position: Vector3<f64>,
pub last_position: Vector3<f64>,
pub moved: bool,
}
impl Position {
pub fn new(x: f64, y: f64, z: f64) -> Position {
Position {
position: Vector3::new(x, y, z),
last_position: Vector3::new(x, y, z),
moved: false,
}
}
pub fn zero() -> Position {
Position::new(0.0, 0.0, 0.0)
}
}
#[derive(Component, Debug)]
pub struct TargetPosition {
pub position: Vector3<f64>,
pub lerp_amount: f64,
}
impl TargetPosition {
pub fn new(x: f64, y: f64, z: f64) -> TargetPosition {
TargetPosition {
position: Vector3::new(x, y, z),
lerp_amount: 0.2,
}
}
pub fn zero() -> TargetPosition {
TargetPosition::new(0.0, 0.0, 0.0)
}
}
/// Velocity of an entity in the world.
#[derive(Component, Debug)]
pub struct Velocity {
pub velocity: Vector3<f64>,
}
impl Velocity {
pub fn new(x: f64, y: f64, z: f64) -> Velocity {
Velocity {
velocity: Vector3::new(x, y, z),
}
}
pub fn zero() -> Velocity {
Velocity::new(0.0, 0.0, 0.0)
}
}
/// Rotation of an entity in the world
#[derive(Component, Debug)]
pub struct Rotation {
pub yaw: f64,
pub pitch: f64,
}
impl Rotation {
pub fn new(yaw: f64, pitch: f64) -> Rotation {
Rotation { yaw, pitch }
}
pub fn zero() -> Rotation {
Rotation::new(0.0, 0.0)
}
}
#[derive(Component, Debug)]
pub struct TargetRotation {
pub yaw: f64,
pub pitch: f64,
}
impl TargetRotation {
pub fn new(yaw: f64, pitch: f64) -> TargetRotation {
TargetRotation { yaw, pitch }
}
pub fn zero() -> TargetRotation {
TargetRotation::new(0.0, 0.0)
}
}
#[derive(Component, Default)]
pub struct Gravity {
pub on_ground: bool,
}
impl Gravity {
pub fn new() -> Gravity {
Default::default()
}
}
#[derive(Component)]
pub struct Bounds {
pub bounds: Aabb3<f64>,
}
impl Bounds {
pub fn new(bounds: Aabb3<f64>) -> Bounds {
Bounds { bounds }
}
}
#[derive(Default)]
pub struct GameInfo {
pub delta: f64,
}
impl GameInfo {
pub fn new() -> GameInfo {
Default::default()
}
}
#[derive(Component, Default)]
pub struct Light {
pub block_light: f32,
pub sky_light: f32,
}
impl Light {
pub fn new() -> Light {
Default::default()
}
}
#[derive(Component, Copy, Clone, Eq, PartialEq, Hash, Debug)]
pub enum EntityType {
DroppedItem,
ExperienceOrb,
LeashHitch,
Painting,
Arrow,
Snowball,
Fireball,
SmallFireball,
EnderPearl,
EnderSignal,
ThrownExpBottle,
ItemFrame,
WitherSkull,
PrimedTnt,
FallingBlock,
Firework,
TippedArrow,
SpectralArrow,
ShulkerBullet,
DragonFireball,
ArmorStand,
MinecartCommand,
Boat,
Minecart,
MinecartChest,
MinecartFurnace,
MinecartTnt,
MinecartHopper,
MinecartMobSpawner,
Creeper,
Skeleton,
Spider,
Giant,
Zombie,
Slime,
Ghast,
PigZombie,
Enderman,
CaveSpider,
Silverfish,
Blaze,
MagmaCube,
EnderDragon,
Wither,
Bat,
Witch,
Endermite,
Guardian,
Shulker,
Pig,
Sheep,
Cow,
Chicken,
Squid,
Wolf,
MushroomCow,
Snowman,
Ocelot,
IronGolem,
Horse,
Rabbit,
PolarBear,
Villager,
EnderCrystal,
SplashPotion,
LingeringPotion,
AreaEffectCloud,
Egg,
FishingHook,
Lightning,
Weather,
Player,
ComplexPart,
Unknown,
ElderGuardian,
WitherSkeleton,
Stray,
Husk,
ZombieVillager,
SkeletonHorse,
ZombieHorse,
Donkey,
Mule,
EvokerFangs,
Evoker,
Vex,
Vindicator,
Llama,
LlamaSpit,
Illusioner,
Parrot,
Turtle,
Phantom,
Trident,
Cod,
Salmon,
Pufferfish,
TropicalFish,
Drowned,
Dolphin,
Cat,
Panda,
Pillager,
Ravager,
TraderLlama,
WanderingTrader,
Fox,
Bee,
ZombifiedPiglin,
Hoglin,
Piglin,
Strider,
Zoglin,
PiglinBrute,
}
impl EntityType {
pub fn create_entity(
&self,
m: &mut Manager,
x: f64,
y: f64,
z: f64,
yaw: f64,
pitch: f64,
) -> Option<Entity> {
if self.supported() {
let ret = self.create_entity_internally(m, x, y, z, yaw, pitch);
self.create_model(m, ret);
return Some(ret);
}
None
}
pub fn create_entity_custom_model(
&self,
m: &mut Manager,
x: f64,
y: f64,
z: f64,
yaw: f64,
pitch: f64,
) -> Option<Entity> {
if self.supported() {
return Some(self.create_entity_internally(m, x, y, z, yaw, pitch));
}
None
}
fn create_entity_internally(
&self,
m: &mut Manager,
x: f64,
y: f64,
z: f64,
yaw: f64,
pitch: f64,
) -> Entity {
let mut entity = m.world.spawn();
entity
.insert(Position::new(x, y, z))
.insert(Rotation::new(yaw, pitch))
.insert(Velocity::new(0.0, 0.0, 0.0))
.insert(TargetPosition::new(x, y, z))
.insert(TargetRotation::new(yaw, pitch))
.insert(Light::new())
.insert(*self);
entity.id()
}
fn create_model(&self, m: &mut Manager, entity: Entity) {
match self {
EntityType::Zombie => {
m.world
.entity_mut(entity)
.insert(ZombieModel::new(Some(String::from("test"))));
}
EntityType::Slime => {
m.world.entity_mut(entity).insert(SlimeModel::new("test"));
}
_ => {}
};
}
fn supported(&self) -> bool {
matches!(self, EntityType::Zombie)
}
}
pub fn resolve_textures(
texture: &Texture,
width: f32,
height: f32,
depth: f32,
offset_x: f32,
offset_y: f32,
) -> [Option<Texture>; 6] {
[
Some(texture.relative(
(offset_x
+ width * TEXTURE_MATRIX[0][0][0]
+ height * TEXTURE_MATRIX[0][0][1]
+ depth * TEXTURE_MATRIX[0][0][2])
/ (texture.get_width() as f32),
(offset_y + depth * TEXTURE_MATRIX[1][0][2]) / (texture.get_height() as f32),
width / (texture.get_width() as f32),
height / (texture.get_height() as f32),
)),
Some(texture.relative(
(offset_x
+ width * TEXTURE_MATRIX[0][1][0]
+ height * TEXTURE_MATRIX[0][1][1]
+ depth * TEXTURE_MATRIX[0][1][2])
/ (texture.get_width() as f32),
(offset_y + depth * TEXTURE_MATRIX[1][1][2]) / (texture.get_height() as f32),
width / (texture.get_width() as f32),
height / (texture.get_height() as f32),
)),
Some(texture.relative(
(offset_x
+ width * TEXTURE_MATRIX[0][2][0]
+ height * TEXTURE_MATRIX[0][2][1]
+ depth * TEXTURE_MATRIX[0][2][2])
/ (texture.get_width() as f32),
(offset_y + depth * TEXTURE_MATRIX[1][2][2]) / (texture.get_height() as f32),
width / (texture.get_width() as f32),
height / (texture.get_height() as f32),
)),
Some(texture.relative(
(offset_x
+ width * TEXTURE_MATRIX[0][3][0]
+ height * TEXTURE_MATRIX[0][3][1]
+ depth * TEXTURE_MATRIX[0][3][2])
/ (texture.get_width() as f32),
(offset_y + depth * TEXTURE_MATRIX[1][3][2]) / (texture.get_height() as f32),
width / (texture.get_width() as f32),
height / (texture.get_height() as f32),
)),
Some(texture.relative(
(offset_x
+ width * TEXTURE_MATRIX[0][4][0]
+ height * TEXTURE_MATRIX[0][4][1]
+ depth * TEXTURE_MATRIX[0][4][2])
/ (texture.get_width() as f32),
(offset_y + depth * TEXTURE_MATRIX[1][4][2]) / (texture.get_height() as f32),
width / (texture.get_width() as f32),
height / (texture.get_height() as f32),
)),
Some(texture.relative(
(offset_x
+ width * TEXTURE_MATRIX[0][5][0]
+ height * TEXTURE_MATRIX[0][5][1]
+ depth * TEXTURE_MATRIX[0][5][2])
/ (texture.get_width() as f32),
(offset_y + depth * TEXTURE_MATRIX[1][5][2]) / (texture.get_height() as f32),
width / (texture.get_width() as f32),
height / (texture.get_height() as f32),
)),
]
} | |
traits1.rs | // traits1.rs
// Time to implement some traits!
//
// Your task is to implement the trait
// `AppendBar' for the type `String'.
//
// The trait AppendBar has only one function,
// which appends "Bar" to any object
// implementing this trait.
trait AppendBar {
fn append_bar(self) -> Self;
}
impl AppendBar for String {
//Add your code here
fn append_bar(self) -> Self {
return self.to_string() + &"Bar".to_string();
}
}
fn main() |
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_FooBar() {
assert_eq!(String::from("Foo").append_bar(), String::from("FooBar"));
}
#[test]
fn is_BarBar() {
assert_eq!(
String::from("").append_bar().append_bar(),
String::from("BarBar")
);
}
}
| {
let s = String::from("Foo");
let s = s.append_bar();
println!("s: {}", s);
} |
product-of-array-except-self.py | # Time: O(n)
# Space: O(1)
class Solution(object):
# @param {integer[]} nums
# @return {integer[]}
def productExceptSelf(self, nums):
if not nums:
return []
left_product = [1 for _ in xrange(len(nums))]
for i in xrange(1, len(nums)):
left_product[i] = left_product[i - 1] * nums[i - 1]
right_product = 1 | left_product[i] = left_product[i] * right_product
return left_product | for i in xrange(len(nums) - 2, -1, -1):
right_product *= nums[i + 1] |
util.py | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from datetime import datetime
from DateTime.DateTime import DateTime
import six
MAX32 = int(2 ** 31 - 1)
def safe_callable(ob):
# Works with ExtensionClasses and Acquisition.
try:
ob.__class__
try:
return bool(ob.__call__)
except AttributeError:
return isinstance(ob, six.class_types)
except AttributeError:
return callable(ob)
def datetime_to_minutes(value, precision=1,
max_value=MAX32, min_value=-MAX32):
if value is None:
return value
if isinstance(value, (str, datetime)):
value = DateTime(value)
if isinstance(value, DateTime):
|
# flatten to precision
if precision > 1:
value = value - (value % precision)
value = int(value)
if value > max_value or value < min_value:
# value must be integer fitting in the range (default 32bit)
raise OverflowError(
'{0} is not within the range of dates allowed.'.format(value))
return value
| value = value.millis() / 1000 / 60 # flatten to minutes |
model.go | package model
import "time"
// HTTPProxyPool [...]
type HTTPProxyPool struct {
ID int `gorm:"primary_key;column:id;type:int(11);not null" json:"-"` | Source string `gorm:"unique_index:idx_ip;column:source;type:varchar(6);not null" json:"source"`
IP string `gorm:"unique_index:idx_ip;column:ip;type:varchar(16);not null" json:"ip"`
Port string `gorm:"unique_index:idx_ip;column:port;type:varchar(6);not null" json:"port"`
City string `gorm:"column:city;type:varchar(24);not null" json:"city"`
Isp string `gorm:"column:isp;type:varchar(12);not null" json:"isp"`
ExpireTime time.Time `gorm:"index:idx_expire;column:expire_time;type:datetime;not null" json:"expire_time"`
Outip string `gorm:"column:outip;type:varchar(16);not null" json:"outip"`
} | CreatedAt time.Time `gorm:"column:created_at;type:datetime;not null" json:"created_at"`
UpdatedAt time.Time `gorm:"column:updated_at;type:datetime;not null" json:"updated_at"` |
http.py | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import logging
import socket
from oslo_utils import importutils
from oslo_utils import netutils
import requests
try:
from requests.packages.urllib3.exceptions import ProtocolError
except ImportError:
ProtocolError = requests.exceptions.ConnectionError
import six
from six.moves.urllib import parse
try:
import json
except ImportError:
import simplejson as json
# Python 2.5 compat fix
if not hasattr(parse, 'parse_qsl'):
import cgi
parse.parse_qsl = cgi.parse_qsl
from oslo_utils import encodeutils
from glanceclient.common import https
from glanceclient.common.utils import safe_header
from glanceclient import exc
osprofiler_web = importutils.try_import("osprofiler.web")
LOG = logging.getLogger(__name__)
USER_AGENT = 'python-glanceclient'
CHUNKSIZE = 1024 * 64 # 64kB
class HTTPClient(object):
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
self.identity_headers = kwargs.get('identity_headers')
self.auth_token = kwargs.get('token')
if self.identity_headers:
if self.identity_headers.get('X-Auth-Token'):
self.auth_token = self.identity_headers.get('X-Auth-Token')
del self.identity_headers['X-Auth-Token']
self.session = requests.Session()
self.session.headers["User-Agent"] = USER_AGENT
if self.auth_token:
self.session.headers["X-Auth-Token"] = self.auth_token
self.timeout = float(kwargs.get('timeout', 600))
if self.endpoint.startswith("https"):
compression = kwargs.get('ssl_compression', True)
if not compression:
self.session.mount("glance+https://", https.HTTPSAdapter())
self.endpoint = 'glance+' + self.endpoint
self.session.verify = (
kwargs.get('cacert', requests.certs.where()),
kwargs.get('insecure', False))
else:
if kwargs.get('insecure', False) is True:
self.session.verify = False
else:
if kwargs.get('cacert', None) is not '':
self.session.verify = kwargs.get('cacert', True)
self.session.cert = (kwargs.get('cert_file'),
kwargs.get('key_file'))
@staticmethod
def parse_endpoint(endpoint):
return netutils.urlsplit(endpoint)
def log_curl_request(self, method, url, headers, data, kwargs):
curl = ['curl -g -i -X %s' % method]
headers = copy.deepcopy(headers)
headers.update(self.session.headers)
for (key, value) in six.iteritems(headers):
header = '-H \'%s: %s\'' % safe_header(key, value)
curl.append(header)
if not self.session.verify:
curl.append('-k')
else:
if isinstance(self.session.verify, six.string_types):
curl.append(' --cacert %s' % self.session.verify)
if self.session.cert:
curl.append(' --cert %s --key %s' % self.session.cert)
if data and isinstance(data, six.string_types):
curl.append('-d \'%s\'' % data)
curl.append(url)
msg = ' '.join([encodeutils.safe_decode(item, errors='ignore')
for item in curl])
LOG.debug(msg)
@staticmethod
def log_http_response(resp, body=None):
status = (resp.raw.version / 10.0, resp.status_code, resp.reason)
dump = ['\nHTTP/%.1f %s %s' % status]
headers = resp.headers.items()
dump.extend(['%s: %s' % safe_header(k, v) for k, v in headers])
dump.append('')
if body:
body = encodeutils.safe_decode(body)
dump.extend([body, ''])
LOG.debug('\n'.join([encodeutils.safe_decode(x, errors='ignore')
for x in dump]))
@staticmethod
def encode_headers(headers):
"""Encodes headers.
Note: This should be used right before
sending anything out.
:param headers: Headers to encode
:returns: Dictionary with encoded headers'
names and values
"""
return dict((encodeutils.safe_encode(h), encodeutils.safe_encode(v))
for h, v in six.iteritems(headers) if v is not None)
def _request(self, method, url, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
headers = kwargs.pop("headers", {})
headers = headers and copy.deepcopy(headers) or {}
if self.identity_headers:
for k, v in six.iteritems(self.identity_headers):
headers.setdefault(k, v)
# Default Content-Type is octet-stream
content_type = headers.get('Content-Type', 'application/octet-stream')
def | (body):
chunk = body
while chunk:
chunk = body.read(CHUNKSIZE)
if chunk == '':
break
yield chunk
data = kwargs.pop("data", None)
if data is not None and not isinstance(data, six.string_types):
try:
data = json.dumps(data)
content_type = 'application/json'
except TypeError:
# Here we assume it's
# a file-like object
# and we'll chunk it
data = chunk_body(data)
headers['Content-Type'] = content_type
stream = True if content_type == 'application/octet-stream' else False
if osprofiler_web:
headers.update(osprofiler_web.get_trace_id_headers())
# Note(flaper87): Before letting headers / url fly,
# they should be encoded otherwise httplib will
# complain.
headers = self.encode_headers(headers)
try:
if self.endpoint.endswith("/") or url.startswith("/"):
conn_url = "%s%s" % (self.endpoint, url)
else:
conn_url = "%s/%s" % (self.endpoint, url)
self.log_curl_request(method, conn_url, headers, data, kwargs)
resp = self.session.request(method,
conn_url,
data=data,
stream=stream,
headers=headers,
**kwargs)
except requests.exceptions.Timeout as e:
message = ("Error communicating with %(endpoint)s %(e)s" %
dict(url=conn_url, e=e))
raise exc.InvalidEndpoint(message=message)
except (requests.exceptions.ConnectionError, ProtocolError) as e:
message = ("Error finding address for %(url)s: %(e)s" %
dict(url=conn_url, e=e))
raise exc.CommunicationError(message=message)
except socket.gaierror as e:
message = "Error finding address for %s: %s" % (
self.endpoint_hostname, e)
raise exc.InvalidEndpoint(message=message)
except (socket.error, socket.timeout) as e:
endpoint = self.endpoint
message = ("Error communicating with %(endpoint)s %(e)s" %
{'endpoint': endpoint, 'e': e})
raise exc.CommunicationError(message=message)
if not resp.ok:
LOG.debug("Request returned failure status %s." % resp.status_code)
raise exc.from_response(resp, resp.text)
elif resp.status_code == requests.codes.MULTIPLE_CHOICES:
raise exc.from_response(resp)
content_type = resp.headers.get('Content-Type')
# Read body into string if it isn't obviously image data
if content_type == 'application/octet-stream':
# Do not read all response in memory when
# downloading an image.
body_iter = _close_after_stream(resp, CHUNKSIZE)
self.log_http_response(resp)
else:
content = resp.text
self.log_http_response(resp, content)
if content_type and content_type.startswith('application/json'):
# Let's use requests json method,
# it should take care of response
# encoding
body_iter = resp.json()
else:
body_iter = six.StringIO(content)
try:
body_iter = json.loads(''.join([c for c in body_iter]))
except ValueError:
body_iter = None
return resp, body_iter
def head(self, url, **kwargs):
return self._request('HEAD', url, **kwargs)
def get(self, url, **kwargs):
return self._request('GET', url, **kwargs)
def post(self, url, **kwargs):
return self._request('POST', url, **kwargs)
def put(self, url, **kwargs):
return self._request('PUT', url, **kwargs)
def patch(self, url, **kwargs):
return self._request('PATCH', url, **kwargs)
def delete(self, url, **kwargs):
return self._request('DELETE', url, **kwargs)
def _close_after_stream(response, chunk_size):
"""Iterate over the content and ensure the response is closed after."""
# Yield each chunk in the response body
for chunk in response.iter_content(chunk_size=chunk_size):
yield chunk
# Once we're done streaming the body, ensure everything is closed.
# This will return the connection to the HTTPConnectionPool in urllib3
# and ideally reduce the number of HTTPConnectionPool full warnings.
response.close()
| chunk_body |
test_timestamp.py | """ test the scalar Timestamp """
import calendar
from datetime import (
datetime,
timedelta,
)
import locale
import pickle
import unicodedata
from dateutil.tz import tzutc
import numpy as np
import pytest
import pytz
from pytz import (
timezone,
utc,
)
from pandas._libs.tslibs.timezones import (
dateutil_gettz as gettz,
get_timezone,
)
import pandas.util._test_decorators as td
from pandas import (
NaT,
Timedelta,
Timestamp,
)
import pandas._testing as tm
from pandas.tseries import offsets
class TestTimestampProperties:
def test_freq_deprecation(self):
# GH#41586
msg = "The 'freq' argument in Timestamp is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# warning issued at construction
ts = Timestamp("2021-06-01", freq="D")
ts2 = Timestamp("2021-06-01", freq="B")
msg = "Timestamp.freq is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# warning issued at attribute lookup
ts.freq
for per in ["month", "quarter", "year"]:
for side in ["start", "end"]:
attr = f"is_{per}_{side}"
with tm.assert_produces_warning(FutureWarning, match=msg):
getattr(ts2, attr)
# is_(month|quarter|year)_(start|end) does _not_ issue a warning
# with freq="D" bc the result will be unaffected by the deprecation
with tm.assert_produces_warning(None):
getattr(ts, attr)
@pytest.mark.filterwarnings("ignore:The 'freq' argument:FutureWarning")
@pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning")
def test_properties_business(self):
ts = Timestamp("2017-10-01", freq="B")
control = Timestamp("2017-10-01")
assert ts.dayofweek == 6
assert ts.day_of_week == 6
assert not ts.is_month_start # not a weekday
assert not ts.freq.is_month_start(ts)
assert ts.freq.is_month_start(ts + Timedelta(days=1))
assert not ts.is_quarter_start # not a weekday
assert not ts.freq.is_quarter_start(ts)
assert ts.freq.is_quarter_start(ts + Timedelta(days=1))
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp("2017-09-30", freq="B")
control = Timestamp("2017-09-30")
assert ts.dayofweek == 5
assert ts.day_of_week == 5
assert not ts.is_month_end # not a weekday
assert not ts.freq.is_month_end(ts)
assert ts.freq.is_month_end(ts - Timedelta(days=1))
assert not ts.is_quarter_end # not a weekday
assert not ts.freq.is_quarter_end(ts)
assert ts.freq.is_quarter_end(ts - Timedelta(days=1))
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
@pytest.mark.parametrize(
"attr, expected",
[
["year", 2014],
["month", 12],
["day", 31],
["hour", 23],
["minute", 59],
["second", 0],
["microsecond", 0],
["nanosecond", 0],
["dayofweek", 2],
["day_of_week", 2],
["quarter", 4],
["dayofyear", 365],
["day_of_year", 365],
["week", 1],
["daysinmonth", 31],
],
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_fields(self, attr, expected, tz):
# GH 10050
# GH 13303
ts = Timestamp("2014-12-31 23:59:00", tz=tz)
result = getattr(ts, attr)
# that we are int like
assert isinstance(result, int)
assert result == expected
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_millisecond_raises(self, tz):
ts = Timestamp("2014-12-31 23:59:00", tz=tz)
msg = "'Timestamp' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
ts.millisecond
@pytest.mark.parametrize(
"start", ["is_month_start", "is_quarter_start", "is_year_start"]
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_is_start(self, start, tz):
ts = Timestamp("2014-01-01 00:00:00", tz=tz)
assert getattr(ts, start)
@pytest.mark.parametrize("end", ["is_month_end", "is_year_end", "is_quarter_end"])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_is_end(self, end, tz):
|
# GH 12806
@pytest.mark.parametrize(
"data",
[Timestamp("2017-08-28 23:00:00"), Timestamp("2017-08-28 23:00:00", tz="EST")],
)
# error: Unsupported operand types for + ("List[None]" and "List[str]")
@pytest.mark.parametrize(
"time_locale", [None] + (tm.get_locales() or []) # type: ignore[operator]
)
def test_names(self, data, time_locale):
# GH 17354
# Test .day_name(), .month_name
if time_locale is None:
expected_day = "Monday"
expected_month = "August"
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
result_day = data.day_name(time_locale)
result_month = data.month_name(time_locale)
# Work around https://github.com/pandas-dev/pandas/issues/22342
# different normalizations
expected_day = unicodedata.normalize("NFD", expected_day)
expected_month = unicodedata.normalize("NFD", expected_month)
result_day = unicodedata.normalize("NFD", result_day)
result_month = unicodedata.normalize("NFD", result_month)
assert result_day == expected_day
assert result_month == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
def test_is_leap_year(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 13727
dt = Timestamp("2000-01-01 00:00:00", tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp("1999-01-01 00:00:00", tz=tz)
assert not dt.is_leap_year
dt = Timestamp("2004-01-01 00:00:00", tz=tz)
assert dt.is_leap_year
dt = Timestamp("2100-01-01 00:00:00", tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array(
[
Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (2005, 1, 1), (2005, 1, 2)]
]
)
assert (result == [52, 52, 53, 53]).all()
def test_resolution(self):
# GH#21336, GH#21365
dt = Timestamp("2100-01-01 00:00:00")
assert dt.resolution == Timedelta(nanoseconds=1)
# Check that the attribute is available on the class, mirroring
# the stdlib datetime behavior
assert Timestamp.resolution == Timedelta(nanoseconds=1)
class TestTimestamp:
def test_tz(self):
tstr = "2014-02-01 09:00"
ts = Timestamp(tstr)
local = ts.tz_localize("Asia/Tokyo")
assert local.hour == 9
assert local == Timestamp(tstr, tz="Asia/Tokyo")
conv = local.tz_convert("US/Eastern")
assert conv == Timestamp("2014-01-31 19:00", tz="US/Eastern")
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize("Asia/Tokyo")
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert("US/Eastern")
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp("2014-11-02 01:00Z").tzinfo) is utc
def test_asm8(self):
np.random.seed(7_960_929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (
Timestamp(n).asm8.view("i8") == np.datetime64(n, "ns").view("i8") == n
)
assert Timestamp("nat").asm8.view("i8") == np.datetime64("nat", "ns").view("i8")
def test_class_ops_pytz(self):
def compare(x, y):
assert int((Timestamp(x).value - Timestamp(y).value) / 1e9) == 0
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(timezone("UTC")))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
msg = "timezone-aware Timestamp with UTC"
with tm.assert_produces_warning(FutureWarning, match=msg):
# GH#22451
ts_utc = Timestamp.utcfromtimestamp(current_time)
compare(
ts_utc,
datetime.utcfromtimestamp(current_time),
)
compare(
Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)
)
compare(
# Support tz kwarg in Timestamp.fromtimestamp
Timestamp.fromtimestamp(current_time, "UTC"),
datetime.fromtimestamp(current_time, utc),
)
compare(
# Support tz kwarg in Timestamp.fromtimestamp
Timestamp.fromtimestamp(current_time, tz="UTC"),
datetime.fromtimestamp(current_time, utc),
)
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(
Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component),
)
def test_class_ops_dateutil(self):
def compare(x, y):
assert (
int(
np.round(Timestamp(x).value / 1e9)
- np.round(Timestamp(y).value / 1e9)
)
== 0
)
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
msg = "timezone-aware Timestamp with UTC"
with tm.assert_produces_warning(FutureWarning, match=msg):
# GH#22451
ts_utc = Timestamp.utcfromtimestamp(current_time)
compare(
ts_utc,
datetime.utcfromtimestamp(current_time),
)
compare(
Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)
)
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(
Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component),
)
def test_basics_nanos(self):
val = np.int64(946_684_800_000_000_000).view("M8[ns]")
stamp = Timestamp(val.view("i8") + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
# GH 14415
val = np.iinfo(np.int64).min + 80_000_000_000_000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
@pytest.mark.parametrize(
"value, check_kwargs",
[
[946688461000000000, {}],
[946688461000000000 / 1000, {"unit": "us"}],
[946688461000000000 / 1_000_000, {"unit": "ms"}],
[946688461000000000 / 1_000_000_000, {"unit": "s"}],
[10957, {"unit": "D", "h": 0}],
[
(946688461000000000 + 500000) / 1000000000,
{"unit": "s", "us": 499, "ns": 964},
],
[
(946688461000000000 + 500000000) / 1000000000,
{"unit": "s", "us": 500000},
],
[(946688461000000000 + 500000) / 1000000, {"unit": "ms", "us": 500}],
[(946688461000000000 + 500000) / 1000, {"unit": "us", "us": 500}],
[(946688461000000000 + 500000000) / 1000000, {"unit": "ms", "us": 500000}],
[946688461000000000 / 1000.0 + 5, {"unit": "us", "us": 5}],
[946688461000000000 / 1000.0 + 5000, {"unit": "us", "us": 5000}],
[946688461000000000 / 1000000.0 + 0.5, {"unit": "ms", "us": 500}],
[946688461000000000 / 1000000.0 + 0.005, {"unit": "ms", "us": 5, "ns": 5}],
[946688461000000000 / 1000000000.0 + 0.5, {"unit": "s", "us": 500000}],
[10957 + 0.5, {"unit": "D", "h": 12}],
],
)
def test_unit(self, value, check_kwargs):
def check(value, unit=None, h=1, s=1, us=0, ns=0):
stamp = Timestamp(value, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != "D":
assert stamp.minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == ns
check(value, **check_kwargs)
def test_roundtrip(self):
# test value to string and back conversions
# further test accessors
base = Timestamp("20140101 00:00:00")
result = Timestamp(base.value + Timedelta("5ms").value)
assert result == Timestamp(f"{base}.005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta("5us").value)
assert result == Timestamp(f"{base}.000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta("5ns").value)
assert result == Timestamp(f"{base}.000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta("6ms 5us").value)
assert result == Timestamp(f"{base}.006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta("200ms 5us").value)
assert result == Timestamp(f"{base}.200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
@pytest.mark.parametrize(
"timezone, year, month, day, hour",
[["America/Chicago", 2013, 11, 3, 1], ["America/Santiago", 2021, 4, 3, 23]],
)
def test_hash_timestamp_with_fold(self, timezone, year, month, day, hour):
# see gh-33931
test_timezone = gettz(timezone)
transition_1 = Timestamp(
year=year,
month=month,
day=day,
hour=hour,
minute=0,
fold=0,
tzinfo=test_timezone,
)
transition_2 = Timestamp(
year=year,
month=month,
day=day,
hour=hour,
minute=0,
fold=1,
tzinfo=test_timezone,
)
assert hash(transition_1) == hash(transition_2)
def test_tz_conversion_freq(self, tz_naive_fixture):
# GH25241
with tm.assert_produces_warning(FutureWarning, match="freq"):
t1 = Timestamp("2019-01-01 10:00", freq="H")
assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq
with tm.assert_produces_warning(FutureWarning, match="freq"):
t2 = Timestamp("2019-01-02 12:00", tz="UTC", freq="T")
assert t2.tz_convert(tz="UTC").freq == t2.freq
def test_pickle_freq_no_warning(self):
# GH#41949 we don't want a warning on unpickling
with tm.assert_produces_warning(FutureWarning, match="freq"):
ts = Timestamp("2019-01-01 10:00", freq="H")
out = pickle.dumps(ts)
with tm.assert_produces_warning(None):
res = pickle.loads(out)
assert res._freq == ts._freq
class TestTimestampNsOperations:
def test_nanosecond_string_parsing(self):
ts = Timestamp("2013-05-01 07:15:45.123456789")
# GH 7878
expected_repr = "2013-05-01 07:15:45.123456789"
expected_value = 1_367_392_545_123_456_789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789+09:00", tz="Asia/Tokyo")
assert ts.value == expected_value - 9 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="UTC")
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="US/Eastern")
assert ts.value == expected_value + 4 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp("20130501T071545.123456789")
assert ts.value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1_293_840_000_000_000_005
t = Timestamp("2011-01-01") + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp("2011-01-01 00:00:00.000000005")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
expected = 1_293_840_000_000_000_010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp("2011-01-01 00:00:00.000000010")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
class TestTimestampToJulianDate:
def test_compare_1700(self):
r = Timestamp("1700-06-23").to_julian_date()
assert r == 2_342_145.5
def test_compare_2000(self):
r = Timestamp("2000-04-12").to_julian_date()
assert r == 2_451_646.5
def test_compare_2100(self):
r = Timestamp("2100-08-12").to_julian_date()
assert r == 2_488_292.5
def test_compare_hour01(self):
r = Timestamp("2000-08-12T01:00:00").to_julian_date()
assert r == 2_451_768.5416666666666666
def test_compare_hour13(self):
r = Timestamp("2000-08-12T13:00:00").to_julian_date()
assert r == 2_451_769.0416666666666666
class TestTimestampConversion:
def test_conversion(self):
# GH#9255
ts = Timestamp("2000-01-01")
result = ts.to_pydatetime()
expected = datetime(2000, 1, 1)
assert result == expected
assert type(result) == type(expected)
result = ts.to_datetime64()
expected = np.datetime64(ts.value, "ns")
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
def test_to_pydatetime_nonzero_nano(self):
ts = Timestamp("2011-01-01 9:00:00.123456789")
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning):
expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.to_pydatetime()
assert result == expected
def test_timestamp_to_datetime(self):
stamp = Timestamp("20090415", tz="US/Eastern")
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_dateutil(self):
stamp = Timestamp("20090415", tz="dateutil/US/Eastern")
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_explicit_pytz(self):
stamp = Timestamp("20090415", tz=pytz.timezone("US/Eastern"))
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
@td.skip_if_windows
def test_timestamp_to_datetime_explicit_dateutil(self):
stamp = Timestamp("20090415", tz=gettz("US/Eastern"))
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_to_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning):
pydt_max = Timestamp.max.to_pydatetime()
assert Timestamp(pydt_max).value / 1000 == Timestamp.max.value / 1000
exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning):
pydt_min = Timestamp.min.to_pydatetime()
# The next assertion can be enabled once GH#39221 is merged
# assert pydt_min < Timestamp.min # this is bc nanos are dropped
tdus = timedelta(microseconds=1)
assert pydt_min + tdus > Timestamp.min
assert Timestamp(pydt_min + tdus).value / 1000 == Timestamp.min.value / 1000
def test_to_period_tz_warning(self):
# GH#21333 make sure a warning is issued when timezone
# info is lost
ts = Timestamp("2009-04-15 16:17:18", tz="US/Eastern")
with tm.assert_produces_warning(UserWarning):
# warning that timezone info will be lost
ts.to_period("D")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
ts = Timestamp(datetime.now())
assert ts.to_datetime64() == ts.to_numpy()
# GH#44460
msg = "dtype and copy arguments are ignored"
with pytest.raises(ValueError, match=msg):
ts.to_numpy("M8[s]")
with pytest.raises(ValueError, match=msg):
ts.to_numpy(copy=True)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"lh,rh",
[
(SubDatetime(2000, 1, 1), Timedelta(hours=1)),
(Timedelta(hours=1), SubDatetime(2000, 1, 1)),
],
)
def test_dt_subclass_add_timedelta(lh, rh):
# GH#25851
# ensure that subclassed datetime works for
# Timedelta operations
result = lh + rh
expected = SubDatetime(2000, 1, 1, 1)
assert result == expected
| ts = Timestamp("2014-12-31 23:59:59", tz=tz)
assert getattr(ts, end) |
rights_reducer.rs | // Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors
// SPDX-License-Identifier: MIT
use std::collections::hash_map::Entry;
use crate::{Action, State};
use redux_rs::ActionWithMeta;
use super::{
cycle_delegates::{CycleDelegatesQuery, CycleDelegatesQueryState},
rights_actions::*,
RightsRequest, Validators,
};
pub fn rights_reducer(state: &mut State, action: &ActionWithMeta<Action>) | {
let requests = &mut state.rights.requests;
match &action.action {
// RPC actions
Action::RightsRpcGet(RightsRpcGetAction { key, rpc_id }) => {
state
.rights
.rpc_requests
.entry(key.clone())
.or_default()
.push(*rpc_id);
}
Action::RightsPruneRpcRequest(RightsRpcPruneAction { key }) => {
state.rights.rpc_requests.remove(key);
}
// Auxiliary actions
Action::RightsInit(RightsInitAction { key }) if !requests.contains_key(key) => {
requests.insert(key.clone(), RightsRequest::Init { start: action.id });
}
Action::RightsGetBlockHeader(RightsGetBlockHeaderAction { key }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::Init { start } = request {
*request = RightsRequest::PendingBlockHeader { start: *start };
}
}
}
Action::RightsBlockHeaderReady(RightsBlockHeaderReadyAction { key, block_header }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::PendingBlockHeader { start } = request {
*request = RightsRequest::BlockHeaderReady {
start: *start,
block_header: block_header.clone(),
};
}
}
}
Action::RightsGetProtocolHash(RightsGetProtocolHashAction { key }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::BlockHeaderReady {
start,
block_header,
} = request
{
*request = RightsRequest::PendingProtocolHash {
start: *start,
block_header: block_header.clone(),
};
}
}
}
Action::RightsProtocolHashReady(RightsProtocolHashReadyAction {
key,
proto_hash,
protocol,
}) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::PendingProtocolHash {
start,
block_header,
} = request
{
*request = RightsRequest::ProtocolHashReady {
start: *start,
block_header: block_header.clone(),
proto_hash: proto_hash.clone(),
protocol: protocol.clone(),
};
}
}
}
Action::RightsGetProtocolConstants(RightsGetProtocolConstantsAction { key }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::ProtocolHashReady {
start,
block_header,
proto_hash,
protocol,
} = request
{
*request = RightsRequest::PendingProtocolConstants {
start: *start,
block_header: block_header.clone(),
proto_hash: proto_hash.clone(),
protocol: protocol.clone(),
};
}
}
}
Action::RightsProtocolConstantsReady(RightsProtocolConstantsReadyAction {
key,
constants,
}) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::PendingProtocolConstants {
start,
block_header,
proto_hash,
protocol,
} = request
{
*request = RightsRequest::ProtocolConstantsReady {
start: *start,
block_header: block_header.clone(),
proto_hash: proto_hash.clone(),
protocol: protocol.clone(),
protocol_constants: constants.clone(),
};
}
}
}
Action::RightsGetCycleEras(RightsGetCycleErasAction { key }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::ProtocolConstantsReady {
start,
block_header,
proto_hash,
protocol,
protocol_constants,
} = request
{
*request =
if let Some(cycle_eras) = state.rights.cycle_eras.get_result(proto_hash) {
RightsRequest::CycleErasReady {
start: *start,
block_header: block_header.clone(),
protocol: protocol.clone(),
protocol_constants: protocol_constants.clone(),
cycle_eras: cycle_eras.clone(),
}
} else {
RightsRequest::PendingCycleEras {
start: *start,
block_header: block_header.clone(),
proto_hash: proto_hash.clone(),
protocol: protocol.clone(),
protocol_constants: protocol_constants.clone(),
}
};
}
}
}
Action::RightsCycleErasReady(RightsCycleErasReadyAction { key, cycle_eras }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::PendingCycleEras {
start,
block_header,
proto_hash: _,
protocol,
protocol_constants,
} = request
{
*request = RightsRequest::CycleErasReady {
start: *start,
block_header: block_header.clone(),
protocol: protocol.clone(),
protocol_constants: protocol_constants.clone(),
cycle_eras: cycle_eras.clone(),
};
}
}
}
Action::RightsGetCycle(RightsGetCycleAction { key }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::CycleErasReady {
start,
block_header,
protocol,
protocol_constants,
cycle_eras,
} = request
{
*request = RightsRequest::PendingCycle {
start: *start,
block_header: block_header.clone(),
protocol: protocol.clone(),
protocol_constants: protocol_constants.clone(),
cycle_eras: cycle_eras.clone(),
};
}
}
}
Action::RightsCycleReady(RightsCycleReadyAction {
key,
cycle,
position,
}) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::PendingCycle {
start,
block_header,
protocol,
protocol_constants,
cycle_eras: _,
} = request
{
*request = RightsRequest::CycleReady {
start: *start,
block_header: block_header.clone(),
protocol: protocol.clone(),
protocol_constants: protocol_constants.clone(),
level: key.level().unwrap_or_else(|| block_header.level()),
cycle: *cycle,
position: *position,
};
}
}
}
Action::RightsGetCycleData(RightsGetCycleDataAction { key }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::CycleReady {
start,
block_header: _,
protocol,
protocol_constants,
level,
cycle,
position,
} = request
{
*request = RightsRequest::PendingCycleData {
start: *start,
protocol: protocol.clone(),
protocol_constants: protocol_constants.clone(),
level: *level,
cycle: *cycle,
position: *position,
};
}
}
}
Action::RightsCycleDataReady(RightsCycleDataReadyAction { key, cycle_data }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::PendingCycleData {
start,
protocol,
protocol_constants,
level,
cycle: _,
position,
} = request
{
*request = RightsRequest::CycleDataReady {
start: *start,
protocol: protocol.clone(),
protocol_constants: protocol_constants.clone(),
level: *level,
position: *position,
cycle_data: cycle_data.clone(),
};
}
}
}
Action::RightsCalculateEndorsingRights(RightsCalculateAction { key }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::CycleDataReady {
start,
protocol,
protocol_constants,
level,
cycle_data,
position,
} = request
{
*request = RightsRequest::PendingRightsCalculation {
start: *start,
protocol: protocol.clone(),
protocol_constants: protocol_constants.clone(),
level: *level,
cycle_data: cycle_data.clone(),
position: *position,
};
}
}
}
Action::RightsGetCycleDelegates(RightsGetCycleDelegatesAction { key }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::CycleReady {
start,
block_header,
level,
cycle,
..
} = request
{
let req = state.rights.cycle_delegates.get(cycle);
*request = if let Some(CycleDelegatesQuery {
state: CycleDelegatesQueryState::Success(delegates),
}) = req
{
RightsRequest::CycleDelegatesReady {
start: *start,
block_header: block_header.clone(),
level: *level,
delegates: delegates.clone(),
}
} else {
RightsRequest::PendingCycleDelegates {
start: *start,
block_header: block_header.clone(),
level: *level,
cycle: *cycle,
}
};
}
}
}
Action::RightsCycleDelegatesReady(RightsCycleDelegatesReadyAction { key, delegates }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::PendingCycleDelegates {
start,
block_header,
level,
cycle: _,
} = request
{
*request = RightsRequest::CycleDelegatesReady {
start: *start,
block_header: block_header.clone(),
level: *level,
delegates: delegates.clone(),
};
}
}
}
Action::RightsCalculateIthaca(RightsCalculateIthacaAction { key }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::CycleDelegatesReady {
start,
block_header,
level,
delegates,
} = request
{
*request = RightsRequest::PendingRightsCalculationIthaca {
start: *start,
block_header: block_header.clone(),
level: *level,
delegates: delegates.clone(),
};
}
}
}
Action::RightsContextRequested(RightsContextRequestedAction { key, token }) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::PendingRightsCalculationIthaca {
start,
block_header: _,
level,
delegates,
} = request
{
*request = RightsRequest::PendingRightsFromContextIthaca {
start: *start,
level: *level,
delegates: delegates.clone(),
token: *token,
};
}
}
}
Action::RightsIthacaContextValidatorsSuccess(
RightsIthacaContextValidatorsSuccessAction {
key,
validators: context_validators,
},
) => {
if let Some(request) = requests.get_mut(key) {
if let RightsRequest::PendingRightsFromContextIthaca {
start: _,
level,
delegates,
..
} = request
{
let validators = context_validators
.validators
.iter()
.filter_map(|pkh| delegates.get(pkh).cloned())
.collect();
let slots = context_validators
.slots
.iter()
.filter_map(|(pkh, rights)| {
delegates.get(pkh).cloned().map(|pk| (pk, rights.clone()))
})
.collect();
*request = RightsRequest::ValidatorsReady(Validators {
level: *level,
validators,
slots,
});
}
}
}
Action::RightsBakingOldReady(RightsBakingOldReadyAction { key, baking_rights }) => {
if let Some(RightsRequest::PendingRightsCalculation { .. }) = requests.remove(key) {
let cache = &mut state.rights.cache.baking;
let duration = state.rights.cache.time;
cache.retain(|_, (timestamp, _)| action.id.duration_since(*timestamp) < duration);
slog::trace!(&state.log, "cached endorsing rights"; "level" => baking_rights.level);
cache.insert(baking_rights.level, (action.id, baking_rights.clone()));
}
}
Action::RightsEndorsingOldReady(RightsEndorsingOldReadyAction {
key,
endorsing_rights,
}) => {
if let Entry::Occupied(entry) = requests.entry(key.clone()) {
if let RightsRequest::PendingRightsCalculation { .. } = entry.get() {
entry.remove();
}
let cache = &mut state.rights.cache.endorsing_old;
let duration = state.rights.cache.time;
cache.retain(|_, (timestamp, _)| action.id.duration_since(*timestamp) < duration);
slog::trace!(&state.log, "cached endorsing rights"; "level" => endorsing_rights.level);
cache.insert(
endorsing_rights.level,
(action.id, endorsing_rights.clone()),
);
}
}
Action::RightsValidatorsReady(RightsValidatorsReadyAction { key }) => {
if let Entry::Occupied(entry) = requests.entry(key.clone()) {
if let RightsRequest::ValidatorsReady(validators) = entry.get() {
let validators = validators.clone();
entry.remove();
let cache = &mut state.rights.cache.validators;
let duration = state.rights.cache.time;
cache.retain(|_, (timestamp, _)| {
action.id.duration_since(*timestamp) < duration
});
slog::trace!(&state.log, "cached endorsing rights"; "level" => validators.level);
cache.insert(validators.level, (action.id, validators));
}
}
}
Action::RightsError(RightsErrorAction { key, error }) => {
if let Some(request) = requests.remove(key) {
state
.rights
.errors
.push((key.clone().into(), request, error.clone()));
}
}
_ => (),
}
} |
|
views.py | from django.utils.translation import ugettext as _
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from .serializers import PaymentInputSerializer, PaymentPatchSerializer, PaymentResponseSerializer
from .services import PaymentService
class PaymentView(GenericAPIView):
serializer_class = PaymentInputSerializer
def | (self, **kwargs):
super().__init__(**kwargs)
self.service = PaymentService()
def post(self, request):
"""
Record a new payment.
"""
params = request.data.copy()
data = self.service.insert(params)
serializer = PaymentResponseSerializer(data)
result = {'detail': _('Payment recorded successfully!'), 'data': serializer.data}
return Response(result)
class PaymentViewId(GenericAPIView):
serializer_class = PaymentPatchSerializer
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.service = PaymentService()
def patch(self, request, payment_id):
"""
Update payment value. If payment value reach zero, the flag is_paid will be set to true.
It will raise an error if:
- Payment is already paid;
- Expiration date has already passed;
- Value is higher than amount available for payment;
- Branch has no balance.
"""
params = dict(
value=request.data.get('value'), id=payment_id
)
data = self.service.pay(params)
serializer = PaymentResponseSerializer(data)
result = {'detail': _('Payment changed successfully!'), 'data': serializer.data}
return Response(result)
| __init__ |
SearchStudio.js | import { SearchFilterFirstLetter, SearchFilterSortBy } from "components/search-filter";
import useEntitySearch from "hooks/useEntitySearch";
import { SearchEntity } from "components/search";
import { navigate } from "gatsby";
import { SummaryCard } from "components/card";
const sortByFields = new Map([
[ "A ➜ Z", "name" ],
[ "Z ➜ A", "-name" ],
[ "Last Added", "-created_at" ]
]);
const sortByOptions = [ ...sortByFields.keys() ];
export function Sear | earchQuery, locationState }) {
const filterFirstLetter = locationState?.filterFirstLetter || null;
const sortBy = locationState?.sortBy || sortByOptions[0];
const updateState = (field) => (newValue) => {
navigate("", {
state: {
...locationState,
[field]: newValue
},
replace: true
});
};
const entitySearch = useEntitySearch("studio", searchQuery, {
filters: {
"name][like": filterFirstLetter ? `${filterFirstLetter}%` : null,
},
sortBy: searchQuery ? null : sortByFields.get(sortBy)
});
return (
<SearchEntity
searchQuery={searchQuery}
filters={
<>
<SearchFilterFirstLetter value={filterFirstLetter} setValue={updateState("filterFirstLetter")}/>
<SearchFilterSortBy
options={searchQuery ? [ "Relevance" ] : sortByOptions}
value={searchQuery ? "Relevance" : sortBy}
setValue={updateState("sortBy")}
/>
</>
}
renderSummaryCard={(studio) => <SummaryCard key={studio.slug} title={studio.name} description="Studio" to={`/studio/${studio.slug}`} />}
{...entitySearch}
/>
);
}
| chStudio({ s |
di.spec.ts | import { Resolver, Factory, fallbackInvoker, transient, singleton } from './../../src/di';
import { spy } from 'sinon';
import { DI, Container, PLATFORM, IContainer, IDefaultableInterfaceSymbol, ResolverStrategy, inject, invokeWithDynamicDependencies, classInvokers, Registration } from "../../src";
import { expect } from "chai";
import { _ } from "./util";
import * as sinon from 'sinon';
function assertIsMutableArray(arr: any[], length: number): void {
expect(Array.isArray(arr)).to.be.true;
expect(arr instanceof Array).to.be.true;
expect(arr).not.to.equal(PLATFORM.emptyArray);
expect(arr.length).to.equal(length);
arr.push(null);
expect(arr.length).to.equal(length + 1);
arr.pop();
expect(arr.length).to.equal(length);
}
function decorator(): ClassDecorator { return (target: any) => target; }
describe(`The DI object`, () => {
describe(`createContainer()`, () => {
it(`returns an instance of Container`, () => {
const actual = DI.createContainer();
expect(actual).to.be.instanceof(Container);
});
it(`returns a new container every time`, () => {
expect(DI.createContainer()).not.to.equal(DI.createContainer());
});
});
describe(`getDesignParamTypes()`, () => {
it(`returns PLATFORM.emptyArray if the class has no constructor or decorators`, () => {
class Foo {}
const actual = DI.getDesignParamTypes(Foo);
expect(actual).to.equal(PLATFORM.emptyArray);
});
it(`returns PLATFORM.emptyArray if the class has a decorator but no constructor`, () => {
@decorator()
class Foo {}
const actual = DI.getDesignParamTypes(Foo);
expect(actual).to.equal(PLATFORM.emptyArray);
});
it(`returns PLATFORM.emptyArray if the class has no constructor args or decorators`, () => {
class Foo { constructor() {} }
const actual = DI.getDesignParamTypes(Foo);
expect(actual).to.equal(PLATFORM.emptyArray);
});
it(`returns PLATFORM.emptyArray if the class has constructor args but no decorators`, () => {
class Bar {}
class Foo { constructor(public bar: Bar) {} }
const actual = DI.getDesignParamTypes(Foo);
expect(actual).to.equal(PLATFORM.emptyArray);
});
it(`returns PLATFORM.emptyArray if the class has constructor args and the decorator is applied via a function call`, () => {
class Bar {}
class Foo { constructor(public bar: Bar) {} }
decorator()(Foo)
const actual = DI.getDesignParamTypes(Foo);
expect(actual).to.equal(PLATFORM.emptyArray);
});
it(`returns PLATFORM.emptyArray if the class is declared as an anonymous variable, even if it has ctor args and decorator is applied properly`, () => {
class Bar {}
@decorator()
const FooInline = class{ constructor(public bar: Bar) {} }
const actual = DI.getDesignParamTypes(FooInline);
expect(actual).to.equal(PLATFORM.emptyArray);
});
it(`returns PLATFORM.emptyArray if the class is declared as a named variable, even if it has ctor args and decorator is applied properly`, () => {
class Bar {}
@decorator()
const FooInline = class Foo{ constructor(public bar: Bar) {} }
const actual = DI.getDesignParamTypes(FooInline);
expect(actual).to.equal(PLATFORM.emptyArray);
});
describe(`returns an empty array if the class has a decorator but no constructor args`, () => {
@decorator()
class Foo { constructor() {} }
it(_`${Foo}`, () => {
const actual = DI.getDesignParamTypes(Foo);
assertIsMutableArray(actual, 0);
});
it(_`${class{}}`, () => {
let cls;
function anonDecorator(): ClassDecorator { return (target: any) => cls = target; }
@anonDecorator()
class{ constructor() {} };
const actual = DI.getDesignParamTypes(cls);
assertIsMutableArray(actual, 0);
});
});
describe(`falls back to Object for declarations that cannot be statically analyzed`, () => {
interface argCtor{}
for (const argCtor of <any[]>[
class Bar{},
function(){},
()=>{},
class{},
{},
Error,
Array,
(class Bar{}).prototype,
(class Bar{}).prototype.constructor
]) {
@decorator()
class FooDecoratorInvocation{ constructor(public arg: argCtor){} }
it(_`${FooDecoratorInvocation} { constructor(${argCtor}) }`, () => {
const actual = DI.getDesignParamTypes(FooDecoratorInvocation);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Object);
});
@(<any>decorator)
class FooDecoratorNonInvocation{ constructor(public arg: argCtor){} }
it(_`${FooDecoratorNonInvocation} { constructor(${argCtor}) }`, () => {
const actual = DI.getDesignParamTypes(FooDecoratorInvocation);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Object);
});
}
});
describe(`falls back to Object for mismatched declarations`, () => {
// Technically we're testing TypeScript here, but it's still useful to have an in-place fixture to validate our assumptions
// And also to have an alert mechanism for when the functionality in TypeScript changes, without having to read the changelogs
// What we're verifying here is under which circumstances a function object will or won't be properly resolved as a
// designParamType, and it seems like the presence of a same-name interface actually breaks this in some situations
// Note: the order of declaration (interface first or other thing first) doesn't seem to matter here
// But whether or not there is a direct type cast, does seem to matter in the case of AnonClass (note the negative assertion)
// It's unclear whether the difference between AnonClass (which works) and AnonClassInterface (which doesn't work) is a bug in TS or not,
// but it has ramifications we need to keep in mind.
interface Bar {}
class Bar{}
interface AnonClass {}
const AnonClass = class{};
interface AnonClassInterface {}
const AnonClassInterface: AnonClassInterface = class{};
interface VarFunc {}
const VarFunc = function(){};
interface VarFuncInterface {}
const VarFuncInterface: VarFuncInterface = function(){};
interface Func {}
function Func(){}
interface Arrow {}
const Arrow = () => {};
interface ArrowInterface {}
const ArrowInterface: ArrowInterface = () => {};
describe(`decorator invocation`, () => {
@decorator()
class FooBar{ constructor(public arg: Bar){} }
// Note: this is a negative assertion meant to make it easier to compare this describe with the one below
it(_`NOT ${FooBar} { constructor(public ${Bar}) }`, () => {
const actual = DI.getDesignParamTypes(FooBar);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Bar);
});
@decorator()
class FooAnonClass{ constructor(public arg: AnonClass){} }
// Note: this is a negative assertion meant to make it easier to compare this describe with the one below
it(_`NOT ${FooAnonClass} { constructor(public ${AnonClass}) }`, () => {
const actual = DI.getDesignParamTypes(FooAnonClass);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(AnonClass);
});
@decorator()
class FooAnonClassInterface{ constructor(public arg: AnonClassInterface){} }
// this one is particularly interesting..
it(_`${FooAnonClassInterface} { constructor(public ${AnonClassInterface}) }`, () => {
const actual = DI.getDesignParamTypes(FooAnonClassInterface);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Object);
});
@decorator()
class FooVarFunc{ constructor(public arg: VarFunc){} }
it(_`${FooVarFunc} { constructor(public ${VarFunc}) }`, () => {
const actual = DI.getDesignParamTypes(FooVarFunc);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Object);
});
@decorator()
class FooVarFuncInterface{ constructor(public arg: VarFuncInterface){} }
it(_`${FooVarFuncInterface} { constructor(public ${VarFuncInterface}) }`, () => {
const actual = DI.getDesignParamTypes(FooVarFuncInterface);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Object);
});
@decorator()
class FooFunc{ constructor(public arg: Func){} }
it(_`${FooFunc} { constructor(public ${Func}) }`, () => {
const actual = DI.getDesignParamTypes(FooFunc);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Object);
});
@decorator()
class FooArrow{ constructor(public arg: Arrow){} }
it(_`${FooArrow} { constructor(public ${Arrow}) }`, () => {
const actual = DI.getDesignParamTypes(FooArrow);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Object);
});
@decorator()
class FooArrowInterface{ constructor(public arg: ArrowInterface){} }
it(_`${FooArrowInterface} { constructor(public ${ArrowInterface}) }`, () => {
const actual = DI.getDesignParamTypes(FooArrowInterface);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Object);
});
});
});
describe(`returns the correct types for valid declarations`, () => {
class Bar{}
const AnonClass = class{};
const VarFunc = function(){};
function Func(){}
const Arrow = () => {};
describe(`decorator invocation`, () => {
@decorator()
class FooBar{ constructor(public arg: Bar){} }
it(_`${FooBar} { constructor(public ${Bar}) }`, () => {
const actual = DI.getDesignParamTypes(FooBar);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Bar);
});
@decorator()
class FooAnonClass{ constructor(public arg: AnonClass){} }
it(_`${FooAnonClass} { constructor(public ${AnonClass}) }`, () => {
const actual = DI.getDesignParamTypes(FooAnonClass);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(AnonClass);
});
@decorator()
class FooVarFunc{ constructor(public arg: VarFunc){} }
it(_`${FooVarFunc} { constructor(public ${VarFunc}) }`, () => {
const actual = DI.getDesignParamTypes(FooVarFunc);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(VarFunc);
});
@decorator()
class FooFunc{ constructor(public arg: Func){} }
it(_`${FooFunc} { constructor(public ${Func}) }`, () => {
const actual = DI.getDesignParamTypes(FooFunc);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Func);
});
@decorator()
class FooArrow{ constructor(public arg: Arrow){} }
it(_`${FooArrow} { constructor(public ${Arrow}) }`, () => {
const actual = DI.getDesignParamTypes(FooArrow);
assertIsMutableArray(actual, 1);
expect(actual[0]).to.equal(Arrow);
});
});
});
});
describe(`getDependencies()`, () => {
let getDesignParamTypes: ReturnType<typeof spy>;
beforeEach(() => {
getDesignParamTypes = spy(DI, 'getDesignParamTypes');
});
afterEach(() => {
getDesignParamTypes.restore();
});
it(`uses getDesignParamTypes() if the static inject property does not exist`, () => {
class Bar {}
@decorator()
class Foo{ constructor(bar: Bar){} }
const actual = DI.getDependencies(Foo);
expect(getDesignParamTypes).to.have.been.calledWith(Foo);
expect(actual).to.deep.equal([Bar]);
});
it(`uses getDesignParamTypes() if the static inject property is undefined`, () => {
class Bar {}
@decorator()
class Foo{ static inject = undefined; constructor(bar: Bar){} }
const actual = DI.getDependencies(Foo);
expect(getDesignParamTypes).to.have.been.calledWith(Foo);
expect(actual).to.deep.equal([Bar]);
});
it(`throws when inject is not an array`, () => {
class Bar {}
class Foo{ static inject = Bar; }
expect(() => DI.getDependencies(Foo)).to.throw();
expect(getDesignParamTypes).not.to.have.been.called;
});
for (const deps of [
[class Bar{}],
[class Bar{}, class Bar{}],
[undefined],
[null],
[42]
]) {
it(_`returns a copy of the inject array ${deps}`, () => {
class Foo{ static inject = deps.slice(); }
const actual = DI.getDependencies(Foo);
expect(getDesignParamTypes).not.to.have.been.called;
expect(actual).to.deep.equal(deps);
expect(actual).not.to.equal(Foo.inject);
});
}
for (const deps of [
[class Bar{}],
[class Bar{}, class Bar{}],
[undefined],
[null],
[42]
]) {
it(_`traverses the 2-layer prototype chain for inject array ${deps}`, () => {
class Foo{ static inject = deps.slice(); }
class Bar extends Foo{ static inject = deps.slice(); }
const actual = DI.getDependencies(Bar);
expect(getDesignParamTypes).not.to.have.been.called;
expect(actual).to.deep.equal([...deps, ...deps]);
expect(actual).not.to.equal(Foo.inject);
expect(actual).not.to.equal(Bar.inject);
});
it(_`traverses the 3-layer prototype chain for inject array ${deps}`, () => {
class Foo{ static inject = deps.slice(); }
class Bar extends Foo{ static inject = deps.slice(); }
class Baz extends Bar{ static inject = deps.slice(); }
const actual = DI.getDependencies(Baz);
expect(getDesignParamTypes).not.to.have.been.called;
expect(actual).to.deep.equal([...deps, ...deps, ...deps]);
expect(actual).not.to.equal(Foo.inject);
expect(actual).not.to.equal(Bar.inject);
expect(actual).not.to.equal(Baz.inject);
});
it(_`traverses the 1-layer + 2-layer prototype chain (with gap) for inject array ${deps}`, () => {
class Foo{ static inject = deps.slice(); }
class Bar extends Foo{ }
class Baz extends Bar{ static inject = deps.slice(); }
class Qux extends Baz{ static inject = deps.slice(); }
const actual = DI.getDependencies(Qux);
expect(getDesignParamTypes).not.to.have.been.called;
expect(actual).to.deep.equal([...deps, ...deps, ...deps]);
expect(actual).not.to.equal(Foo.inject);
expect(actual).not.to.equal(Baz.inject);
expect(actual).not.to.equal(Qux.inject);
});
}
});
describe(`createInterface()`, () => {
it(`returns a function that has withDefault and noDefault functions`, () => {
const sut = DI.createInterface();
expect(typeof sut).to.equal('function');
expect(typeof sut.withDefault).to.equal('function');
expect(typeof sut.noDefault).to.equal('function');
});
it(`noDefault returns self`, () => {
const sut = DI.createInterface();
expect(sut.noDefault()).to.equal(sut);
});
it(`withDefault returns self with modified withDefault that throws`, () => {
const sut = DI.createInterface();
const sut2 = sut.withDefault(<any>null);
expect(sut).to.equal(sut2);
expect(() => sut.withDefault(<any>null)).to.throw;
});
describe(`withDefault returns self with register function that registers the appropriate resolver`, () => {
let sut: IDefaultableInterfaceSymbol<any>;
let container: IContainer;
let registerResolver: ReturnType<typeof spy>;
beforeEach(() => {
sut = DI.createInterface();
container = new Container();
registerResolver = spy(container, 'registerResolver');
});
afterEach(() => {
registerResolver.restore();
});
function matchResolver(key: any, strategy: any, state: any): sinon.SinonMatcher {
return sinon.match(val => val.key === key && val.strategy === strategy && val.state === state);
}
it(`instance without key`, () => {
const value = {};
sut.withDefault(builder => builder.instance(value));
(<any>sut).register(container);
expect(registerResolver).to.have.been.calledWith(sut, matchResolver(sut, ResolverStrategy.instance, value));
});
it(`instance with key`, () => {
const value = {};
sut.withDefault(builder => builder.instance(value));
(<any>sut).register(container, 'key');
expect(registerResolver).to.have.been.calledWith('key', matchResolver('key', ResolverStrategy.instance, value));
});
it(`singleton without key`, () => {
class Foo {}
sut.withDefault(builder => builder.singleton(Foo));
(<any>sut).register(container);
expect(registerResolver).to.have.been.calledWith(sut, matchResolver(sut, ResolverStrategy.singleton, Foo));
});
it(`singleton with key`, () => {
class Foo {}
sut.withDefault(builder => builder.singleton(Foo));
(<any>sut).register(container, 'key');
expect(registerResolver).to.have.been.calledWith('key', matchResolver('key', ResolverStrategy.singleton, Foo));
});
it(`transient without key`, () => {
class Foo {}
sut.withDefault(builder => builder.transient(Foo));
(<any>sut).register(container);
expect(registerResolver).to.have.been.calledWith(sut, matchResolver(sut, ResolverStrategy.transient, Foo));
});
it(`transient with key`, () => {
class Foo {}
sut.withDefault(builder => builder.transient(Foo));
(<any>sut).register(container, 'key');
expect(registerResolver).to.have.been.calledWith('key', matchResolver('key', ResolverStrategy.transient, Foo));
});
it(`callback without key`, () => {
const callback = () => {};
sut.withDefault(builder => builder.callback(callback));
(<any>sut).register(container);
expect(registerResolver).to.have.been.calledWith(sut, matchResolver(sut, ResolverStrategy.callback, callback));
});
it(`callback with key`, () => {
const callback = () => {};
sut.withDefault(builder => builder.callback(callback));
(<any>sut).register(container, 'key');
expect(registerResolver).to.have.been.calledWith('key', matchResolver('key', ResolverStrategy.callback, callback));
});
it(`aliasTo without key`, () => {
sut.withDefault(builder => builder.aliasTo('key2'));
(<any>sut).register(container);
expect(registerResolver).to.have.been.calledWith(sut, matchResolver(sut, ResolverStrategy.alias, 'key2'));
});
it(`aliasTo with key`, () => {
sut.withDefault(builder => builder.aliasTo('key2'));
(<any>sut).register(container, 'key1');
expect(registerResolver).to.have.been.calledWith('key1', matchResolver('key1', ResolverStrategy.alias, 'key2'));
});
});
});
});
describe(`The inject decorator`, () => {
class Dep1{}
class Dep2{}
class Dep3{}
it(`can decorate classes with explicit dependencies`, () => {
@inject(Dep1, Dep2, Dep3)
class Foo {}
expect(Foo['inject']).to.deep.equal([Dep1, Dep2, Dep3]);
});
it(`can decorate classes with implicit dependencies`, () => {
@inject()
class Foo { constructor(dep1: Dep1, dep2: Dep2, dep3: Dep3){} }
expect(Foo['inject']).to.deep.equal([Dep1, Dep2, Dep3]);
});
it(`can decorate constructor parameters explicitly`, () => {
class Foo { constructor(@inject(Dep1)dep1, @inject(Dep2)dep2, @inject(Dep3)dep3){} }
expect(Foo['inject']).to.deep.equal([Dep1, Dep2, Dep3]);
});
it(`can decorate constructor parameters implicitly`, () => {
class | { constructor(@inject() dep1: Dep1, @inject() dep2: Dep2, @inject() dep3: Dep3){} }
expect(Foo['inject']).to.deep.equal([Dep1, Dep2, Dep3]);
});
it(`can decorate properties explicitly`, () => {
class Foo { @inject(Dep1)dep1; @inject(Dep2)dep2; @inject(Dep3)dep3; }
expect(Foo['inject'].dep1).to.equal(Dep1);
expect(Foo['inject'].dep2).to.equal(Dep2);
expect(Foo['inject'].dep3).to.equal(Dep3);
});
it(`cannot decorate properties implicitly`, () => {
class Foo { @inject()dep1: Dep1; @inject()dep2: Dep2; @inject()dep3: Dep3; }
expect(Foo['inject'].dep1).to.be.undefined;
expect(Foo['inject'].dep2).to.be.undefined;
expect(Foo['inject'].dep3).to.be.undefined;
});
});
describe(`The transient decorator`, () => {
it(`works as a plain decorator`, () => {
@transient
class Foo {}
expect(Foo['register']).to.be.a('function');
const container = DI.createContainer();
const foo1 = container.get(Foo);
const foo2 = container.get(Foo);
expect(foo1).not.to.equal(foo2);
});
it(`works as an invocation`, () => {
@transient()
class Foo {}
expect(Foo['register']).to.be.a('function');
const container = DI.createContainer();
const foo1 = container.get(Foo);
const foo2 = container.get(Foo);
expect(foo1).not.to.equal(foo2);
});
});
describe(`The singleton decorator`, () => {
it(`works as a plain decorator`, () => {
@singleton
class Foo {}
expect(Foo['register']).to.be.a('function');
const container = DI.createContainer();
const foo1 = container.get(Foo);
const foo2 = container.get(Foo);
expect(foo1).to.equal(foo2);
});
it(`works as an invocation`, () => {
@singleton()
class Foo {}
expect(Foo['register']).to.be.a('function');
const container = DI.createContainer();
const foo1 = container.get(Foo);
const foo2 = container.get(Foo);
expect(foo1).to.equal(foo2);
});
});
describe(`The Resolver class`, () => {
let container: IContainer;
let registerResolver: ReturnType<typeof spy>;
beforeEach(() => {
container = new Container();
registerResolver = spy(container, 'registerResolver');
});
afterEach(() => {
registerResolver.restore();
});
describe(`register()`, () => {
it(`registers the resolver to the container with the provided key`, () => {
const sut = new Resolver('foo', 0, null);
sut.register(container, 'bar');
expect(registerResolver).to.have.been.calledWith('bar', sut);
})
it(`registers the resolver to the container with its own`, () => {
const sut = new Resolver('foo', 0, null);
sut.register(container);
expect(registerResolver).to.have.been.calledWith('foo', sut);
})
});
describe(`resolve()`, () => {
it(`instance - returns state`, () => {
const state = {};
const sut = new Resolver('foo', ResolverStrategy.instance, state);
const actual = sut.resolve(container, container);
expect(actual).to.equal(state);
});
it(`singleton - returns an instance of the type and sets strategy to instance`, () => {
class Foo {}
const sut = new Resolver('foo', ResolverStrategy.singleton, Foo);
const actual = sut.resolve(container, container);
expect(actual).to.be.instanceof(Foo);
const actual2 = sut.resolve(container, container);
expect(actual2).to.equal(actual);
});
it(`transient - always returns a new instance of the type`, () => {
class Foo {}
const sut = new Resolver('foo', ResolverStrategy.transient, Foo);
const actual1 = sut.resolve(container, container);
expect(actual1).to.be.instanceof(Foo);
const actual2 = sut.resolve(container, container);
expect(actual2).to.be.instanceof(Foo);
expect(actual2).not.to.equal(actual1);
});
it(`array - calls resolve() on the first item in the state array`, () => {
const resolver = { resolve: spy() };
const sut = new Resolver('foo', ResolverStrategy.array, [resolver]);
sut.resolve(container, container);
expect(resolver.resolve).to.have.been.calledWith(container, container);
});
it(`throws for unknown strategy`, () => {
const sut = new Resolver('foo', -1, null);
expect(() => sut.resolve(container, container)).to.throw(/6/);
});
});
describe(`getFactory()`, () => {
it(`returns a new singleton Factory if it does not exist`, () => {
class Foo{}
const sut = new Resolver(Foo, ResolverStrategy.singleton, Foo);
const actual = sut.getFactory(container);
expect(actual).to.be.instanceof(Factory);
expect(actual.Type).to.equal(Foo);;
});
it(`returns a new transient Factory if it does not exist`, () => {
class Foo{}
const sut = new Resolver(Foo, ResolverStrategy.transient, Foo);
const actual = sut.getFactory(container);
expect(actual).to.be.instanceof(Factory);
expect(actual.Type).to.equal(Foo);
});
it(`returns a null for instance strategy`, () => {
class Foo{}
const sut = new Resolver(Foo, ResolverStrategy.instance, Foo);
const actual = sut.getFactory(container);
expect(actual).to.be.null;
});
it(`returns a null for array strategy`, () => {
class Foo{}
const sut = new Resolver(Foo, ResolverStrategy.array, Foo);
const actual = sut.getFactory(container);
expect(actual).to.be.null;
});
it(`returns a null for alias strategy`, () => {
class Foo{}
const sut = new Resolver(Foo, ResolverStrategy.alias, Foo);
const actual = sut.getFactory(container);
expect(actual).to.be.null;
});
it(`returns a null for callback strategy`, () => {
class Foo{}
const sut = new Resolver(Foo, ResolverStrategy.callback, Foo);
const actual = sut.getFactory(container);
expect(actual).to.be.null;
});
});
});
describe(`The Factory class`, () => {
describe(`create()`, () => {
for (const count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) {
it(`returns a new Factory with ${count} deps`, () => {
class Bar{}
class Foo{static inject=Array(count).map(c => Bar)}
const actual = Factory.create(Foo);
expect(actual).to.be.instanceof(Factory);
expect(actual.Type).to.equal(Foo);
if (count < 6) {
expect(actual['invoker']).to.equal(classInvokers[count]);
} else {
expect(actual['invoker']).to.equal(fallbackInvoker);
}
expect(actual['dependencies']).not.to.equal(Foo.inject);
expect(actual['dependencies']).to.deep.equal(Foo.inject);
});
}
});
describe(`construct()`, () => {
for (const staticCount of [0, 1, 2, 3, 4, 5, 6, 7]) {
for (const dynamicCount of [0, 1, 2]) {
const container = new Container();
it(`instantiates a type with ${staticCount} static deps and ${dynamicCount} dynamic deps`, () => {
class Bar{}
class Foo{args:any[];constructor(...args:any[]){this.args=args}static inject=Array(staticCount).fill(Bar)}
const sut = Factory.create(Foo);
const dynamicDeps = dynamicCount ? Array(dynamicCount).fill({}) : undefined;
const actual = sut.construct(container, dynamicDeps);
for (let i = 0, ii = Foo.inject.length; i < ii; ++i) {
expect(actual.args[i]).to.be.instanceof(Foo.inject[i]);
}
for (let i = 0, ii = dynamicDeps ? dynamicDeps.length : 0; i < ii; ++i) {
expect(actual.args[Foo.inject.length+i]).to.equal(dynamicDeps[i]);
}
});
}
}
});
describe(`registerTransformer()`, () => {
it(`registers the transformer`, () => {
const container = new Container();
class Foo{bar;baz}
const sut = Factory.create(Foo);
sut.registerTransformer(foo => Object.assign(foo, { bar: 1 }));
sut.registerTransformer(foo => Object.assign(foo, { baz: 2 }));
const foo = sut.construct(container);
expect(foo.bar).to.equal(1);
expect(foo.baz).to.equal(2);
expect(foo).to.be.instanceof(Foo);
});
});
});
describe(`The Container class`, () => {
let sut: IContainer;
beforeEach(() => {
sut = new Container();
});
describe(`register()`, () => {
let register: ReturnType<typeof spy>;
beforeEach(() => {
register = spy();
});
it(_`calls register() on {register}`, () => {
sut.register({register});
expect(register).to.have.been.calledWith(sut);
})
it(_`calls register() on {register},{register}`, () => {
sut.register({register},{register});
expect(register).to.have.been.calledWith(sut);
expect(register.getCalls().length).to.equal(2);
})
it(_`calls register() on [{register},{register}]`, () => {
sut.register(<any>[{register},{register}]);
expect(register).to.have.been.calledWith(sut);
expect(register.getCalls().length).to.equal(2);
})
it(_`calls register() on {foo:{register}}`, () => {
sut.register({foo:{register}});
expect(register).to.have.been.calledWith(sut);
})
it(_`calls register() on {foo:{register}},{foo:{register}}`, () => {
sut.register({foo:{register}},{foo:{register}});
expect(register).to.have.been.calledWith(sut);
expect(register.getCalls().length).to.equal(2);
})
it(_`calls register() on [{foo:{register}},{foo:{register}}]`, () => {
sut.register(<any>[{foo:{register}},{foo:{register}}]);
expect(register).to.have.been.calledWith(sut);
expect(register.getCalls().length).to.equal(2);
})
it(_`calls register() on {register},{foo:{register}}`, () => {
sut.register({register},{foo:{register}});
expect(register).to.have.been.calledWith(sut);
expect(register.getCalls().length).to.equal(2);
})
it(_`calls register() on [{register},{foo:{register}}]`, () => {
sut.register(<any>[{register},{foo:{register}}]);
expect(register).to.have.been.calledWith(sut);
expect(register.getCalls().length).to.equal(2);
})
it(_`calls register() on [{register},{}]`, () => {
sut.register(<any>[{register},{}]);
expect(register).to.have.been.calledWith(sut);
})
it(_`calls register() on [{},{register}]`, () => {
sut.register(<any>[{},{register}]);
expect(register).to.have.been.calledWith(sut);
})
it(_`calls register() on [{foo:{register}},{foo:{}}]`, () => {
sut.register(<any>[{foo:{register}},{foo:{}}]);
expect(register).to.have.been.calledWith(sut);
})
it(_`calls register() on [{foo:{}},{foo:{register}}]`, () => {
sut.register(<any>[{foo:{}},{foo:{register}}]);
expect(register).to.have.been.calledWith(sut);
})
});
describe(`registerResolver()`, () => {
for (const key of [null, undefined, Object]) {
it(_`throws on invalid key ${key}`, () => {
expect(() => sut.registerResolver(key, <any>null)).to.throw(/5/);
});
}
it(`registers the resolver if it does not exist yet`, () => {
const key = {};
const resolver = new Resolver(key, ResolverStrategy.instance, {});
sut.registerResolver(key, resolver);
const actual = sut.getResolver(key);
expect(actual).to.equal(resolver);
});
it(`changes to array resolver if the key already exists`, () => {
const key = {};
const resolver1 = new Resolver(key, ResolverStrategy.instance, {});
const resolver2 = new Resolver(key, ResolverStrategy.instance, {});
sut.registerResolver(key, resolver1);
const actual1 = sut.getResolver(key);
expect(actual1).to.equal(resolver1);
sut.registerResolver(key, resolver2);
const actual2 = sut.getResolver(key);
expect(actual2).not.to.equal(actual1);
expect(actual2).not.to.equal(resolver1);
expect(actual2).not.to.equal(resolver2);
expect(actual2['strategy']).to.equal(ResolverStrategy.array);
expect(actual2['state'][0]).to.equal(resolver1);
expect(actual2['state'][1]).to.equal(resolver2);
});
it(`appends to the array resolver if the key already exists more than once`, () => {
const key = {};
const resolver1 = new Resolver(key, ResolverStrategy.instance, {});
const resolver2 = new Resolver(key, ResolverStrategy.instance, {});
const resolver3 = new Resolver(key, ResolverStrategy.instance, {});
sut.registerResolver(key, resolver1);
sut.registerResolver(key, resolver2);
sut.registerResolver(key, resolver3);
const actual1 = sut.getResolver(key);
expect(actual1['strategy']).to.equal(ResolverStrategy.array);
expect(actual1['state'][0]).to.equal(resolver1);
expect(actual1['state'][1]).to.equal(resolver2);
expect(actual1['state'][2]).to.equal(resolver3);
});
});
describe(`registerTransformer()`, () => {
for (const key of [null, undefined, Object]) {
it(_`throws on invalid key ${key}`, () => {
expect(() => sut.registerTransformer(key, <any>null)).to.throw(/5/);
});
}
it(`registers the transformer if it does not exist yet`, () => {
});
it(`reuses the existing transformer if it exists`, () => {
});
});
describe(`getResolver()`, () => {
for (const key of [null, undefined, Object]) {
it(_`throws on invalid key ${key}`, () => {
expect(() => sut.getResolver(key, <any>null)).to.throw(/5/);
});
}
});
describe(`has()`, () => {
for (const key of [null, undefined, Object]) {
it(_`returns false for non-existing key ${key}`, () => {
expect(sut.has(<any>key, false)).to.be.false;
});
}
it(`returns true for existing key`, () => {
const key = {};
sut.registerResolver(key, new Resolver(key, ResolverStrategy.instance, {}));
expect(sut.has(<any>key, false)).to.be.true;
});
});
describe(`get()`, () => {
for (const key of [null, undefined, Object]) {
it(_`throws on invalid key ${key}`, () => {
expect(() => sut.get(key)).to.throw(/5/);
});
}
});
describe(`getAll()`, () => {
for (const key of [null, undefined, Object]) {
it(_`throws on invalid key ${key}`, () => {
expect(() => sut.getAll(key)).to.throw(/5/);
});
}
});
describe(`getFactory()`, () => {
for (const count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) {
sut = new Container(); // ensure the state is reset (beforeEach doesn't know about loops)
it(`returns a new Factory with ${count} deps if it does not exist`, () => {
class Bar{}
class Foo{static inject=Array(count).map(c => Bar)}
const actual = sut.getFactory(Foo);
expect(actual).to.be.instanceof(Factory);
expect(actual.Type).to.equal(Foo);
if (count < 6) {
expect(actual['invoker']).to.equal(classInvokers[count]);
} else {
expect(actual['invoker']).to.equal(fallbackInvoker);
}
expect(actual['dependencies']).not.to.equal(Foo.inject);
expect(actual['dependencies']).to.deep.equal(Foo.inject);
});
}
it(`reuses the existing factory if it already exists`, () => {
const create = spy(Factory, 'create');
class Foo{}
const actual = sut.getFactory(Foo);
expect(actual).to.be.instanceof(Factory);
const actual2 = sut.getFactory(Foo);
expect(actual).to.equal(actual2);
expect(create).to.have.been.calledOnce;
create.restore();
});
});
describe(`createChild()`, () => {
it(`creates a child with same config and sut as parent`, () => {
const actual = sut.createChild();
expect(actual['configuration']).to.equal(sut['configuration']);
expect(actual['parent']).to.equal(sut);
expect(sut['parent']).to.be.null
});
});
describe(`jitRegister()`, () => {
});
});
describe(`The Registration object`, () => {
it(`instance() returns the correct resolver`, () => {
const value = {};
const actual = Registration.instance('key', value);
expect(actual['key']).to.equal('key');
expect(actual['strategy']).to.equal(ResolverStrategy.instance);
expect(actual['state']).to.equal(value);
});
it(`singleton() returns the correct resolver`, () => {
class Foo {}
const actual = Registration.singleton('key', Foo);
expect(actual['key']).to.equal('key');
expect(actual['strategy']).to.equal(ResolverStrategy.singleton);
expect(actual['state']).to.equal(Foo);
});
it(`transient() returns the correct resolver`, () => {
class Foo {}
const actual = Registration.transient('key', Foo);
expect(actual['key']).to.equal('key');
expect(actual['strategy']).to.equal(ResolverStrategy.transient);
expect(actual['state']).to.equal(Foo);
});
it(`callback() returns the correct resolver`, () => {
const callback = () => {};
const actual = Registration.callback('key', callback);
expect(actual['key']).to.equal('key');
expect(actual['strategy']).to.equal(ResolverStrategy.callback);
expect(actual['state']).to.equal(callback);
});
it(`alias() returns the correct resolver`, () => {
const actual = Registration.alias('key', 'key2');
expect(actual['key']).to.equal('key2');
expect(actual['strategy']).to.equal(ResolverStrategy.alias);
expect(actual['state']).to.equal('key');
});
});
describe(`The classInvokers object`, () => {
const container = <IContainer><any>{ get(t){ return new t(); } }
class Foo { args: any[]; constructor(...args: any[]){ this.args = args; } }
class Dep1{}
class Dep2{}
class Dep3{}
class Dep4{}
class Dep5{}
class Dep6{}
it(`invoke() handles 0 deps`, () => {
const actual = classInvokers[0].invoke(container, Foo, []);
expect(actual.args.length).to.equal(0);
});
it(`invoke() handles 1 dep`, () => {
const actual = classInvokers[1].invoke(container, Foo, [Dep1]);
expect(actual.args.length).to.equal(1);
expect(actual.args[0]).to.be.instanceof(Dep1);
});
it(`invoke() handles 2 deps`, () => {
const actual = classInvokers[2].invoke(container, Foo, [Dep1, Dep2]);
expect(actual.args.length).to.equal(2);
expect(actual.args[0]).to.be.instanceof(Dep1);
expect(actual.args[1]).to.be.instanceof(Dep2);
});
it(`invoke() handles 3 deps`, () => {
const actual = classInvokers[3].invoke(container, Foo, [Dep1, Dep2, Dep3]);
expect(actual.args.length).to.equal(3);
expect(actual.args[0]).to.be.instanceof(Dep1);
expect(actual.args[1]).to.be.instanceof(Dep2);
expect(actual.args[2]).to.be.instanceof(Dep3);
});
it(`invoke() handles 4 deps`, () => {
const actual = classInvokers[4].invoke(container, Foo, [Dep1, Dep2, Dep3, Dep4]);
expect(actual.args.length).to.equal(4);
expect(actual.args[0]).to.be.instanceof(Dep1);
expect(actual.args[1]).to.be.instanceof(Dep2);
expect(actual.args[2]).to.be.instanceof(Dep3);
expect(actual.args[3]).to.be.instanceof(Dep4);
});
it(`invoke() handles 5 deps`, () => {
const actual = classInvokers[5].invoke(container, Foo, [Dep1, Dep2, Dep3, Dep4, Dep5]);
expect(actual.args.length).to.equal(5);
expect(actual.args[0]).to.be.instanceof(Dep1);
expect(actual.args[1]).to.be.instanceof(Dep2);
expect(actual.args[2]).to.be.instanceof(Dep3);
expect(actual.args[3]).to.be.instanceof(Dep4);
expect(actual.args[4]).to.be.instanceof(Dep5);
});
it(`invoke() does not handle 6 deps`, () => {
expect(() => classInvokers[6].invoke(container, Foo, [Dep1, Dep2, Dep3, Dep4, Dep5, Dep6])).to.throw(/undefined/);
});
});
describe(`The invokeWithDynamicDependencies function`, () => {
const container = <IContainer><any>{ get(t){ return 'static'+t; } }
class Foo { args: any[]; constructor(...args: any[]){ this.args = args; } }
const deps = [class Dep1{}, class Dep2{}, class Dep3{}];
it(_`throws when staticDeps is null`, () => {
expect(() => invokeWithDynamicDependencies(container, Foo, null, [])).to.throw();
});
it(_`throws when any of the staticDeps is null`, () => {
expect(() => invokeWithDynamicDependencies(container, Foo, [null], [])).to.throw(/7/);
});
it(_`throws when any of the staticDeps is undefined`, () => {
expect(() => invokeWithDynamicDependencies(container, Foo, [undefined], [])).to.throw(/7/);
});
it(_`throws when staticDeps is undefined`, () => {
expect(() => invokeWithDynamicDependencies(container, Foo, undefined, [])).to.throw();
});
it(_`handles staticDeps is ${deps}`, () => {
const actual = <Foo>invokeWithDynamicDependencies(container, Foo, deps, []);
expect(actual.args).to.deep.equal(deps.map(d => 'static'+d));
});
it(`handles dynamicDeps is null`, () => {
const actual = <Foo>invokeWithDynamicDependencies(container, Foo, [], null);
expect(actual.args.length).to.equal(1);
expect(actual.args[0]).to.be.null;
});
it(`handles dynamicDeps is undefined`, () => {
const actual = <Foo>invokeWithDynamicDependencies(container, Foo, [], undefined);
expect(actual.args.length).to.equal(0);
});
it(_`handles dynamicDeps is ${deps}`, () => {
const actual = <Foo>invokeWithDynamicDependencies(container, Foo, [], deps);
expect(actual.args).to.deep.equal(deps);
});
it(_`handles staticDeps is ${deps} and dynamicDeps is ${deps}`, () => {
const actual = <Foo>invokeWithDynamicDependencies(container, Foo, deps, deps);
expect(actual.args[0]).to.equal('static'+deps[0]);
expect(actual.args[1]).to.equal('static'+deps[1]);
expect(actual.args[2]).to.equal('static'+deps[2]);
expect(actual.args[3]).to.equal(deps[0]);
expect(actual.args[4]).to.equal(deps[1]);
expect(actual.args[5]).to.equal(deps[2]);
});
});
| Foo |
gatsby-node.js | const path = require('path');
const slash = require('slash');
exports.createPages = ({ graphql, actions }) => {
const { createPage } = actions;
return new Promise((resolve, reject) => {
resolve(
graphql(`
{
allProjectsJson {
edges {
node {
name
description
headline
stack | weight
route
team {
name
twitter
}
content {
title
description
screenshots {
screen
}
}
}
}
}
}
`).then(result => {
if (result.errors) {
reject(result.errors);
}
// Create pages for each project detail
const projectDetailsTemplate = path.resolve(`src/templates/ProjectLandingPage/index.js`);
result.data.allProjectsJson.edges.forEach(({ node }) => {
createPage({
path: `/${node.route.toLowerCase()}`,
component: slash(projectDetailsTemplate),
context: {
project: { ...node },
},
});
});
return;
})
);
});
}; | tags
repo
image
featured |
VisCostCallback.py | from bokeh.plotting import output_notebook, figure, ColumnDataSource, show
from bokeh.io import push_notebook
from timeit import default_timer
import math
from collections import deque
class CostVisCallback(object):
"""
Callback providing a live updating console based progress bar.
"""
def __init__(self, epoch_freq=1, y_range=(0, 4.5), fig=None, handle=None,
update_thresh_s=0.65, w=400, h=300, nepochs=1.0, total_batches=10.0,
train_source=None, val_source=None, history=10):
self.update_thresh_s = update_thresh_s
self.w = w
self.h = h
self.nepochs = nepochs
self.total = total_batches
self.last_update = 0
self.epoch = -1
self.history = history
self.cost_history = deque(maxlen=history)
if handle is None:
output_notebook()
self.handle = None
else:
self.handle = handle
if fig is None:
self.fig = figure(name="cost", y_axis_label="Cost", x_range=(0, self.nepochs), y_range=y_range,
x_axis_label="Epoch", plot_width=self.w, plot_height=self.h)
else:
self.fig = fig
if train_source is None:
self.train_source = ColumnDataSource(data=dict(x=[], y=[]))
else:
self.train_source = train_source
self.train_source.data = dict(x=[], y=[])
self.train_cost = self.fig.line('x', 'y', source=self.train_source)
if val_source is None:
self.val_source = ColumnDataSource(data=dict(x=[], y=[]))
else:
self.val_source = val_source
self.val_source.data = dict(x=[], y=[])
self.val_cost = self.fig.line('x', 'y', source=self.val_source, color='red')
def get_average_cost(self, cost):
self.cost_history.append(cost)
return sum(list(self.cost_history))/ float(len(self.cost_history))
def train_callback(self, param):
self._process_batch(param, 'train')
def eval_callback(self, param):
self._process_batch(param, 'eval')
def _process_batch(self, param, name):
if self.handle is None:
self.handle = show(self.fig, notebook_handle=True)
now = default_timer()
# print "{}_{}".format(param.nbatch, param.epoch)
if param.nbatch == 0:
self.epoch = self.epoch + 1
time = float(param.nbatch) / self.total + param.epoch
if param.eval_metric is not None:
name_value = param.eval_metric.get_name_value()
param.eval_metric.reset()
cost = name_value[0][1]
if name == 'train':
cost = self.get_average_cost(cost)
if math.isnan(cost) or cost > 4000: | self.train_source.data['y'].append(cost)
elif name == 'eval':
self.val_source.data['x'].append(param.epoch+1)
self.val_source.data['y'].append(cost)
if (now - self.last_update > self.update_thresh_s):
self.last_update = now
if self.handle is not None:
push_notebook(handle=self.handle)
else:
push_notebook()
def get_callbacks(self):
return {'train_cost': self.train_callback,
'eval_cost': self.eval_callback} | cost = 4000
if name == 'train':
self.train_source.data['x'].append(time) |
__init__.py | try:
from .pfdo_med2image import pfdo_med2image
except: | from pfdo_med2image import pfdo_med2image |
|
gameClassifier.js | 'use strict';
describe('Service: gameClassifier', function () {
beforeEach(module('twistedHangmanApp'));
var playerMD5 = 'anmd5!';
var mockPlayerService = {
currentPlayer: function () {
return {md5: playerMD5};
}
};
var twGameDetails = {
playerActionRequired: jasmine.createSpy('action')
};
beforeEach(module(function ($provide) {
$provide.factory('jtbPlayerService', [function () {
return mockPlayerService;
}]);
$provide.factory('twGameDetails', [function () {
return twGameDetails;
}]);
}));
var service;
beforeEach(inject(function ($injector) {
twGameDetails.playerActionRequired.calls.reset();
service = $injector.get('jtbGameClassifier');
}));
var expectedYourTurnClassification = 'Your move.';
var expectedTheirTurnClassification = 'Their move.';
var expectedOlderGameClassification = 'Older games.';
var expectedIconMap = {};
expectedIconMap[expectedYourTurnClassification] = 'play';
expectedIconMap[expectedTheirTurnClassification] = 'pause';
expectedIconMap[expectedOlderGameClassification] = 'stop';
it('get classifications', function () {
expect(service.getClassifications()).toEqual([expectedYourTurnClassification, expectedTheirTurnClassification, expectedOlderGameClassification]);
});
it('get icon map', function () {
expect(service.getIcons()).toEqual(expectedIconMap);
});
| it('classification for no player action needed, non-roundover phase', function () {
var game = {gamePhase: 'TBD'};
twGameDetails.playerActionRequired.and.returnValue(false);
expect(service.getClassification(game)).toEqual(expectedTheirTurnClassification);
expect(twGameDetails.playerActionRequired).toHaveBeenCalledWith(game, playerMD5);
});
it('classification for player action needed, non-roundover phase', function () {
var game = {gamePhase: 'TBD'};
twGameDetails.playerActionRequired.and.returnValue(true);
expect(service.getClassification(game)).toEqual(expectedYourTurnClassification);
expect(twGameDetails.playerActionRequired).toHaveBeenCalledWith(game, playerMD5);
});
it('classification for phase RoundOver', function () {
var game = {gamePhase: 'RoundOver'};
twGameDetails.playerActionRequired.and.returnValue(false);
expect(service.getClassification(game)).toEqual(expectedYourTurnClassification);
expect(twGameDetails.playerActionRequired).toHaveBeenCalledWith(game, playerMD5);
});
angular.forEach(['Declined', 'Quit', 'NextRoundStarted'], function (phase) {
it('classification for phase ' + phase, function () {
var game = {gamePhase: phase};
twGameDetails.playerActionRequired.and.returnValue(false);
expect(service.getClassification(game)).toEqual(expectedOlderGameClassification);
});
});
}); | |
cis-bed.d.ts | export declare const cisBed: string[]; |
||
file.py | # Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Utility/file.py
from os import listdir, makedirs
from os.path import exists, isfile, join, splitext
import shutil
import re
def sorted_alphanum(file_list_ordered):
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(file_list_ordered, key=alphanum_key)
def get_file_list(path, extension=None):
if extension is None:
file_list = [path + f for f in listdir(path) if isfile(join(path, f))]
else:
file_list = [path + f for f in listdir(path)
if isfile(join(path, f)) and splitext(f)[1] == extension]
file_list = sorted_alphanum(file_list)
return file_list
def | (path_dataset, folder_names):
for folder_name in folder_names:
if exists(join(path_dataset, folder_name)):
path = join(path_dataset, folder_name)
return path
def get_rgbd_folders(path_dataset):
path_color = add_if_exists(path_dataset, ["image/", "rgb/", "color/"])
path_depth = join(path_dataset, "depth/")
return path_color, path_depth
def get_rgbd_file_lists(path_dataset):
path_color, path_depth = get_rgbd_folders(path_dataset)
color_files = get_file_list(path_color, ".jpg") + \
get_file_list(path_color, ".png")
depth_files = get_file_list(path_depth, ".png")
return color_files, depth_files
def make_clean_folder(path_folder):
if not exists(path_folder):
makedirs(path_folder)
else:
shutil.rmtree(path_folder)
makedirs(path_folder)
def check_folder_structure(path_dataset):
path_color, path_depth = get_rgbd_folders(path_dataset)
assert exists(path_depth), \
"Path %s is not exist!" % path_depth
assert exists(path_color), \
"Path %s is not exist!" % path_color
| add_if_exists |
basic.py | """
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
WARNING
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
This directory is for the internal of Theano.
You are strongly advised not to use it, except if you know
what you are doing!
If you want to use a scalar variable in a Theano graph,
you probably want to use theano.tensor.[c,z,f,d,b,w,i,l,]scalar!
"""
import math
import warnings
from copy import copy
from itertools import imap
from textwrap import dedent
import numpy
import theano
from theano.compat import PY3
from theano import gof
from theano.gof import (Op, utils, Variable, Constant, Type, Apply,
FunctionGraph)
from theano.gof.python25 import partial, all, any
from theano.configparser import config
from theano.gradient import DisconnectedType
from theano.gradient import grad_undefined
builtin_complex = complex
builtin_int = int
builtin_float = float
class ComplexError(Exception):
"""Raised if complex numbers are used in an unsupported operation."""
pass
class IntegerDivisionError(Exception):
"""Raised if someone tries to divide integers with '/' instead of '//'."""
pass
def upcast(dtype, *dtypes):
# Should we try to keep float32 instead of float64? This is used so that
# for instance mixing int64 with float32 yields float32 instead of float64.
# Note that we store this boolean as a one-element list so that it can be
# modified within `make_array`.
keep_float32 = [(config.cast_policy == 'numpy+floatX' and
config.floatX == 'float32')]
def make_array(dt):
if dt == 'float64':
# There is an explicit float64 dtype: we cannot keep float32.
keep_float32[0] = False
return numpy.zeros((), dtype=dt)
z = make_array(dtype)
for dt in dtypes:
z = z + make_array(dt=dt)
rval = str(z.dtype)
if rval == 'float64' and keep_float32[0]:
return 'float32'
else:
return rval
def as_scalar(x, name=None):
if isinstance(x, gof.Apply):
if len(x.outputs) != 1:
raise ValueError("It is ambiguous which output of a multi-output"
" Op has to be fetched.", x)
else:
x = x.outputs[0]
if isinstance(x, Variable):
if not isinstance(x.type, Scalar):
raise TypeError("Variable type field must be a Scalar.", x, x.type)
return x
try:
return constant(x)
except TypeError:
raise TypeError("Cannot convert %s to Scalar" % x, type(x))
def constant(x):
# pass through numpy scalars, since they are already typed on
# purpose typically.
if hasattr(x, 'dtype'):
assert x.ndim == 0
return ScalarConstant(Scalar(str(x.dtype)), x)
if isinstance(x, builtin_float):
for dtype in ['float32', 'float64']:
x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_):
break
x_ = None
assert x_ is not None
return ScalarConstant(Scalar(str(x_.dtype)), x)
if isinstance(x, builtin_int):
for dtype in ['int8', 'int16', 'int32', 'int64']:
x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_):
break
x_ = None
assert x_ is not None
return ScalarConstant(Scalar(str(x_.dtype)), x)
if isinstance(x, builtin_complex):
#TODO: We have added the complex type, so this should be tested
raise NotImplementedError()
raise TypeError(x)
#return ScalarConstant(float64, float(x))
class Scalar(Type):
"""
Internal class, should not be used by clients
Primarily used by tensor.elemwise and tensor.reduce
Analogous to TensorType, but for zero-dimensional objects
Maps directly to C primitives
TODO: refactor to be named ScalarType for consistency with TensorType
"""
ndim = 0
def __init__(self, dtype):
if dtype == 'floatX':
dtype = config.floatX
self.dtype = dtype
self.dtype_specs() # error checking
def filter(self, data, strict=False, allow_downcast=None):
py_type = self.dtype_specs()[0]
if strict and not isinstance(data, py_type):
raise TypeError("%s expected a %s, got %s of type %s" % (
self, py_type, data, type(data)), data)
try:
converted_data = py_type(data)
if (allow_downcast or
(allow_downcast is None and
type(data) is float and
self.dtype == theano.config.floatX) or
data == converted_data):
return py_type(data)
else:
raise TypeError('Value cannot accurately be converted to dtype'
' (%s) and allow_downcast is not True' % self.dtype)
except Exception, e:
raise TypeError("Could not convert %s (value=%s) to %s" % (
type(data), data, self.dtype), e)
def values_eq_approx(self, a, b, tolerance=1e-4):
return abs(a - b) <= ((abs(a) + abs(b)) * tolerance)
def c_headers(self):
l = ['<math.h>']
l.append('<numpy/arrayscalars.h>')
if config.lib.amdlibm:
l += ['<amdlibm.h>']
return l
def c_libraries(self):
l = []
if config.lib.amdlibm:
l += ['amdlibm']
return l
def c_compile_args(self):
if config.lib.amdlibm:
return ['-DREPLACE_WITH_AMDLIBM']
else:
return []
def __eq__(self, other):
return type(self) == type(other) and other.dtype == self.dtype
def __hash__(self):
return hash('theano.scalar.Scalar') ^ hash(self.dtype)
def dtype_specs(self):
try:
return { # dtype: (py_type, c_type, cls_name)
'float32': (numpy.float32, 'npy_float32', 'Float32'),
'float64': (numpy.float64, 'npy_float64', 'Float64'),
'complex128': (numpy.complex128, 'theano_complex128',
'Complex128'),
'complex64': (numpy.complex64, 'theano_complex64',
'Complex64'),
'uint8': (numpy.uint8, 'npy_uint8', 'UInt8'),
'int8': (numpy.int8, 'npy_int8', 'Int8'),
'uint16': (numpy.uint16, 'npy_uint16', 'UInt16'),
'int16': (numpy.int16, 'npy_int16', 'Int16'),
'uint32': (numpy.uint32, 'npy_uint32', 'UInt32'),
'int32': (numpy.int32, 'npy_int32', 'Int32'),
'uint64': (numpy.uint64, 'npy_uint64', 'UInt64'),
'int64': (numpy.int64, 'npy_int64', 'Int64')
}[self.dtype]
except KeyError:
raise TypeError("Unsupported dtype for %s: %s" % (
self.__class__.__name__, self.dtype))
def upcast(self, *others):
return upcast(*[x.dtype for x in [self] + list(others)])
def make_variable(self, name=None):
return ScalarVariable(self, name=name)
def __str__(self):
return str(self.dtype)
def __repr__(self):
return "Scalar(%s)" % self.dtype
def c_literal(self, data):
if 'complex' in self.dtype:
raise NotImplementedError("No literal for complex values.")
return str(data)
def c_declare(self, name, sub):
return """
%(dtype)s %(name)s;
typedef %(dtype)s %(name)s_dtype;
""" % dict(name=name, dtype=self.dtype_specs()[1])
def c_init(self, name, sub):
return """
%(name)s = 0;
""" % locals()
def c_extract(self, name, sub):
specs = self.dtype_specs()
return """
if (!PyObject_TypeCheck(py_%(name)s, &%(pyarr_type)s))
{
PyErr_Format(PyExc_ValueError,
"Scalar check failed (%(dtype)s)");
%(fail)s
}
PyArray_ScalarAsCtype(py_%(name)s, &%(name)s);
""" % dict(sub,
name=name,
dtype=specs[1],
pyarr_type='Py%sArrType_Type' % specs[2])
def c_sync(self, name, sub):
specs = self.dtype_specs()
return """
Py_XDECREF(py_%(name)s);
py_%(name)s = PyArrayScalar_New(%(cls)s);
if (!py_%(name)s)
{
Py_XINCREF(Py_None);
py_%(name)s = Py_None;
PyErr_Format(PyExc_MemoryError,
"Instantiation of new Python scalar failed (%(dtype)s)");
%(fail)s
}
PyArrayScalar_ASSIGN(py_%(name)s, %(cls)s, %(name)s);
""" % dict(sub,
name=name,
dtype=specs[1],
cls=specs[2])
def c_cleanup(self, name, sub):
return ""
def c_support_code(self):
if self.dtype.startswith('complex'):
cplx_types = ['theano_complex64', 'theano_complex128']
real_types = ['npy_int8', 'npy_int16', 'npy_int32', 'npy_int64',
'npy_float32', 'npy_float64']
# If the 'int' C type is not exactly the same as an existing
# 'npy_intX', some C code may not compile, e.g. when assigning
# the value 0 (cast to 'int' in C) to a theano_complex64.
if (numpy.dtype('intc').num not in
[numpy.dtype(d[4:]).num for d in real_types]):
# In that case we add the 'int' type to the real types.
real_types.append('int')
template = """
struct theano_complex%(nbits)s : public npy_complex%(nbits)s
{
typedef theano_complex%(nbits)s complex_type;
typedef npy_float%(half_nbits)s scalar_type;
complex_type operator +(const complex_type &y) const {
complex_type ret;
ret.real = this->real + y.real;
ret.imag = this->imag + y.imag;
return ret;
}
complex_type operator -() const {
complex_type ret;
ret.real = -this->real;
ret.imag = -this->imag;
return ret;
}
bool operator ==(const complex_type &y) const {
return (this->real == y.real) && (this->imag == y.imag);
}
bool operator ==(const npy_float%(nbits)s &y) const {
return (this->real == y) && (this->imag == 0);
}
complex_type operator -(const complex_type &y) const {
complex_type ret;
ret.real = this->real - y.real;
ret.imag = this->imag - y.imag;
return ret;
}
complex_type operator *(const complex_type &y) const {
complex_type ret;
ret.real = this->real * y.real - this->imag * y.imag;
ret.imag = this->real * y.imag + this->imag * y.real;
return ret;
}
complex_type operator /(const complex_type &y) const {
complex_type ret;
scalar_type y_norm_square = y.real * y.real + y.imag * y.imag;
ret.real = (this->real * y.real + this->imag * y.imag) / y_norm_square;
ret.imag = (this->imag * y.real - this->real * y.imag) / y_norm_square;
return ret;
}
template <typename T>
complex_type& operator =(const T& y);
theano_complex%(nbits)s() {}
template <typename T>
theano_complex%(nbits)s(const T& y) { *this = y; }
template <typename TR, typename TI>
theano_complex%(nbits)s(const TR& r, const TI& i) { this->real=r; this->imag=i; }
};
"""
def operator_eq_real(mytype, othertype):
return '''
template <> %(mytype)s & %(mytype)s::operator=<%(othertype)s>(const %(othertype)s & y)
{ this->real=y; this->imag=0; return *this; }
''' % dict(mytype=mytype, othertype=othertype)
def operator_eq_cplx(mytype, othertype):
return '''
template <> %(mytype)s & %(mytype)s::operator=<%(othertype)s>(const %(othertype)s & y)
{ this->real=y.real; this->imag=y.imag; return *this; }
''' % dict(mytype=mytype, othertype=othertype)
operator_eq = ''.join(operator_eq_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types) \
+ ''.join(operator_eq_cplx(ctype1, ctype2)
for ctype1 in cplx_types
for ctype2 in cplx_types)
# We are not using C++ generic templating here, because this would
# generate two different functions for adding a complex64 and a
# complex128, one returning a complex64, the other a complex128,
# and the compiler complains it is ambiguous.
# Instead, we generate code for known and safe types only.
def operator_plus_real(mytype, othertype):
return '''
const %(mytype)s operator+(const %(mytype)s &x, const %(othertype)s &y)
{ return %(mytype)s(x.real+y, x.imag); }
const %(mytype)s operator+(const %(othertype)s &y, const %(mytype)s &x)
{ return %(mytype)s(x.real+y, x.imag); }
''' % dict(mytype=mytype, othertype=othertype)
operator_plus = ''.join(operator_plus_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types)
def operator_minus_real(mytype, othertype):
return '''
const %(mytype)s operator-(const %(mytype)s &x, const %(othertype)s &y)
{ return %(mytype)s(x.real-y, x.imag); }
const %(mytype)s operator-(const %(othertype)s &y, const %(mytype)s &x)
{ return %(mytype)s(y-x.real, -x.imag); }
''' % dict(mytype=mytype, othertype=othertype)
operator_minus = ''.join(operator_minus_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types)
def operator_mul_real(mytype, othertype):
return '''
const %(mytype)s operator*(const %(mytype)s &x, const %(othertype)s &y)
{ return %(mytype)s(x.real*y, x.imag*y); }
const %(mytype)s operator*(const %(othertype)s &y, const %(mytype)s &x)
{ return %(mytype)s(x.real*y, x.imag*y); }
''' % dict(mytype=mytype, othertype=othertype)
operator_mul = ''.join(operator_mul_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types)
return template % dict(nbits=64, half_nbits=32) \
+ template % dict(nbits=128, half_nbits=64) \
+ operator_eq \
+ operator_plus \
+ operator_minus \
+ operator_mul
else:
return ""
def c_code_cache_version(self):
# Use the correct type checking and conversion functions
return (10, numpy.__version__)
# Make operators work with 64 and 128 arguments at the same time
return (9, numpy.__version__)
# put const around operators and added unary '-' operator
return (8, numpy.__version__)
# no need to put lib.amdlibm here as c_compile_args() are put
# in the key.
return (7,) # make complex c code optional
return (6,) # added implemeentations of operators that work
# with scalar arguments
return (5,) # added constructors to theano_complex class
return (4,) # explicit T given in specialization of operator=
# lines. This makes it compile with open64
def get_shape_info(self, obj):
|
def get_size(self, shape_info):
return shape_info
# Register C code for ViewOp on Scalars.
theano.compile.register_view_op_c_code(
Scalar,
"""
%(oname)s = %(iname)s;
""",
1)
int8 = Scalar('int8')
int16 = Scalar('int16')
int32 = Scalar('int32')
int64 = Scalar('int64')
uint8 = Scalar('uint8')
uint16 = Scalar('uint16')
uint32 = Scalar('uint32')
uint64 = Scalar('uint64')
float32 = Scalar('float32')
float64 = Scalar('float64')
complex64 = Scalar('complex64')
complex128 = Scalar('complex128')
int_types = int8, int16, int32, int64
uint_types = uint8, uint16, uint32, uint64
float_types = float32, float64
complex_types = complex64, complex128
discrete_types = int_types + uint_types
continuous_types = float_types + complex_types
all_types = discrete_types + continuous_types
class _scalar_py_operators:
# So that we can simplify checking code when we have a mixture of Scalar
# variables and Tensor variables
ndim = 0
dtype = property(lambda self: self.type.dtype)
""" The dtype of this scalar. """
#UNARY
def __abs__(self):
return abs_(self)
def __neg__(self):
return neg(self)
#CASTS
#def __int__(self): return AsInt(self).out
#def __float__(self): return AsDouble(self).out
#def __complex__(self): return AsComplex(self).out
#BITWISE
def __invert__(self):
return invert(self)
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __xor__(self, other):
return xor(self, other)
def __rand__(self, other):
return and_(other, self)
def __ror__(self, other):
return or_(other, self)
def __rxor__(self, other):
return xor(other, self)
#COMPARISONS
def __lt__(self, other):
return lt(self, other)
def __le__(self, other):
return le(self, other)
def __gt__(self, other):
return gt(self, other)
def __ge__(self, other):
return ge(self, other)
#ARITHMETIC - NORMAL
def __add__(self, other):
return add(self, other)
def __sub__(self, other):
return sub(self, other)
def __mul__(self, other):
return mul(self, other)
if PY3:
def __truediv__(self, other):
return div_proxy(self, other)
else:
def __div__(self, other):
return div_proxy(self, other)
def __floordiv__(self, other):
return int_div(self, other)
def __mod__(self, other):
return mod_check(self, other)
def __pow__(self, other):
return pow(self, other)
#ARITHMETIC - RIGHT-OPERAND
def __radd__(self, other):
return add(other, self)
def __rsub__(self, other):
return sub(other, self)
def __rmul__(self, other):
return mul(other, self)
def __rdiv__(self, other):
return div_proxy(other, self)
def __rmod__(self, other):
return mod(other, self)
def __rpow__(self, other):
return pow(other, self)
def zeros_like(self):
# The second is needed for Elemwise ops to work right
return second(self, ScalarConstant(Scalar(str(self.type.dtype)), 0))
def astype(self, dtype):
return cast(self, dtype)
class ScalarVariable(_scalar_py_operators, Variable):
pass
class ScalarConstant(_scalar_py_operators, Constant):
pass
# Register ScalarConstant as the type of Constant corresponding to Scalar
Scalar.Constant = ScalarConstant
# Easy constructors
def _multi(*fns):
def f2(f, names):
if len(names) == 1:
return f(names)
else:
return [f(name) for name in names]
if len(fns) == 1:
return partial(f2, fns[0])
else:
return [partial(f2, f) for f in fns]
ints = _multi(int64)
floats = _multi(float64)
complexs = _multi(complex128)
complexs64 = _multi(complex64)
complexs128 = _multi(complex128)
# Using a class instead of a function makes it possible to deep-copy it in
# Python 2.4.
# Note that currently only a few functions use this mechanism, because it is
# enough to make the test-suite pass with Python 2.4. However, it may prove
# necessary to use this same mechanism in other places as well in the future.
class upcast_out(object):
def __new__(self, *types):
return Scalar(dtype=Scalar.upcast(*types)),
class upgrade_to_float(object):
def __new__(self, *types):
"""
Upgrade any int types to float32 or float64 to avoid losing precision.
"""
conv = {int8: float32,
int16: float32,
int32: float64,
int64: float64,
uint8: float32,
uint16: float32,
uint32: float64,
uint64: float64}
return Scalar(Scalar.upcast(*[conv.get(type, type)
for type in types])),
class same_out(object):
def __new__(self, type):
return type,
def upcast_out_no_complex(*types):
if any([type in complex_types for type in types]):
raise TypeError('complex type are not supported')
return Scalar(dtype=Scalar.upcast(*types)),
def same_out_float_only(type):
if type not in float_types:
raise TypeError('only float type are supported')
return type,
class transfer_type(gof.utils.object2):
def __init__(self, *transfer):
assert all(type(x) == int for x in transfer)
self.transfer = transfer
def __str__(self):
return 'transfer_type{%s}' % self.transfer
def __call__(self, *types):
upcast = upcast_out(*types)
retval = []
for i in self.transfer:
if i is None:
retval += [upcast]
else:
retval += [types[i]]
return retval
#return [upcast if i is None else types[i] for i in self.transfer]
def __eq__(self, other):
return type(self) == type(other) and self.transfer == other.transfer
def __hash__(self):
return hash(self.transfer)
class specific_out(gof.utils.object2):
def __init__(self, *spec):
self.spec = spec
def __call__(self, *types):
return self.spec
def __eq__(self, other):
return type(self) == type(other) and self.spec == other.spec
def __hash__(self):
return hash(self.spec)
def int_out(*types):
return int64,
def float_out(*types):
return float64,
def upgrade_to_float_no_complex(*types):
"""
don't accept complex, otherwise call upgrade_to_float().
"""
for type in types:
if type in complex_types:
raise TypeError('complex argument not supported')
return upgrade_to_float(*types)
def same_out_nocomplex(type):
if type in complex_types:
raise TypeError('complex argument not supported')
return type,
def int_out_nocomplex(*types):
for type in types:
if type in complex_types:
raise TypeError('complex argument not supported')
return int64,
def float_out_nocomplex(*types):
for type in types:
if type in complex_types:
raise TypeError('complex argument not supported')
return float64,
class unary_out_lookup(gof.utils.object2):
"""
get a output_types_preference object by passing a dictionary:
unary_out_lookup({int8:int32, float32:complex128})
The result is an op that maps in8 to int32 and float32 to
complex128 and other input types lead to a TypeError.
"""
def __init__(self, type_table):
self.tbl = type_table
def __call__(self, *types):
if len(types) == 1:
types = types[0]
try:
rval = self.tbl[types]
except Exception:
raise TypeError(types)
if isinstance(types, (list, tuple)):
return rval
else:
return [rval]
def __eq__(self, other):
return type(self) == type(other) and self.tbl == other.tbl
def __hash__(self):
return hash(type(self)) # ignore hash of table
def real_out(type):
if type == complex64:
return float32,
if type == complex128:
return float64,
return type,
class ScalarOp(Op):
nin = -1
nout = 1
def __init__(self, output_types_preference=None, name=None):
self.name = name
if output_types_preference is not None:
if not callable(output_types_preference):
raise TypeError(
"Expected a callable for the 'output_types_preference' argument to %s. (got: %s)" % (self.__class__, output_types_preference))
self.output_types_preference = output_types_preference
def make_node(self, *inputs):
if self.nin >= 0:
if len(inputs) != self.nin:
raise TypeError("Wrong number of inputs for %s.make_node (got %i(%s), expected %i)" \
% (self, len(inputs), str(inputs), self.nin))
inputs = [as_scalar(input) for input in inputs]
outputs = [t() for t in self.output_types([input.
type for input in inputs])]
if len(outputs) != self.nout:
raise TypeError("Not the right number of outputs produced for %s(%s). Expected %s, got %s."
% (self, ", ".join(str(input) for input in inputs), self.nout, len(outputs)))
return Apply(self, inputs, outputs)
def output_types(self, types):
if hasattr(self, 'output_types_preference'):
variables = self.output_types_preference(*types)
if not isinstance(variables, (list, tuple)) or any(not isinstance(x, Type) for x in variables):
raise TypeError(
"output_types_preference should return a list or a tuple of types", self.output_types_preference, variables)
if len(variables) != self.nout:
raise TypeError("Not the right number of outputs types produced for %s(%s) by %s. Expected %s, got %s."
% (self, ", ".join(str(type) for type in variables),
self.output_types_preference, self.nout, len(variables)))
return variables
else:
raise NotImplementedError(
"Cannot calculate the output types for %s" % self)
def perform(self, node, inputs, output_storage):
if self.nout == 1:
output_storage[0][0] = self.impl(*inputs)
else:
variables = utils.from_return_values(self.impl(*inputs))
assert len(variables) == len(output_storage)
for storage, variable in zip(output_storage, variables):
storage[0] = variable
def impl(self, *inputs):
raise utils.MethodNotDefined("impl", type(self),
self.__class__.__name__)
def grad(self, inputs, output_gradients):
raise utils.MethodNotDefined("grad", type(self),
self.__class__.__name__)
def __eq__(self, other):
test = type(self) == type(other) \
and getattr(self, 'output_types_preference', None) \
== getattr(other, 'output_types_preference', None)
return test
def __hash__(self):
return hash(type(self).__name__) ^ hash(
getattr(self, 'output_types_preference', 0))
def __str__(self):
if hasattr(self, 'name') and self.name:
return self.name
else:
param = [(k, v) for k, v in self.__dict__.items()
if k not in ["name", "_op_use_c_code"]]
if param:
return "%s{%s}" % (self.__class__.__name__,
", ".join("%s=%s" % (k, v)
for k, v in param))
else:
return self.__class__.__name__
def c_code_cache_version(self):
return (4,)
def c_code_contiguous(self, node, name, inp, out, sub):
"""This function is called by Elemwise when all inputs and
outputs are c_contiguous. This allows to use the SIMD version
of this op.
The inputs are the same as c_code except that:
- inp and out must be the names of the variables associated to the
ndarrays in the C code
- node must be the elemwise node (this is needed to know
the inputs/outputs types)
"""
raise theano.gof.utils.MethodNotDefined()
class UnaryScalarOp(ScalarOp):
nin = 1
amd_float32 = None
amd_float64 = None
def c_code_contiguous(self, node, name, (x, ), (z, ), sub):
if (not theano.config.lib.amdlibm or
# We compare the dtype AND the broadcast flag
# as this function do not broadcast
node.inputs[0].type != node.outputs[0].type):
raise theano.gof.utils.MethodNotDefined()
dtype = node.inputs[0].dtype
if dtype == 'float32' and self.amd_float32 is not None:
dtype = 'float'
fct = self.amd_float32
elif dtype == 'float64' and self.amd_float64 is not None:
dtype = 'double'
fct = self.amd_float64
else:
raise theano.gof.utils.MethodNotDefined()
return """
npy_intp n = PyArray_SIZE(%(z)s);
%(dtype)s * x = (%(dtype)s*) PyArray_DATA(%(x)s);
%(dtype)s * z = (%(dtype)s*) PyArray_DATA(%(z)s);
%(fct)s(n, x, z);
""" % locals()
class BinaryScalarOp(ScalarOp):
# One may define in subclasses the following fields:
# - `identity`: for an associative operation, identity corresponds to
# the neutral element. For instance, it will be 0 for addition, 1 for
# multiplication, True for "and", False for "or".
# - `commutative`: whether op(a, b) == op(b, a)
# - `associative`: whether op(op(a, b), c) == op(a, op(b, c))
nin = 2
###############
# Comparisons
###############
class LogicalComparison(BinaryScalarOp):
def output_types(self, *input_dtypes):
return [int8]
def grad(self, inputs, output_gradients):
x, y = inputs
out = self(x, y)
assert str(out.type.dtype).find('int') != -1
return [x.zeros_like().astype(theano.config.floatX),
y.zeros_like().astype(theano.config.floatX)]
class FixedLogicalComparison(UnaryScalarOp):
"""
Comparison to a fixed value.
"""
def output_types(self, *input_dtypes):
return [int8]
def grad(self, inputs, output_gradients):
x ,= inputs
out = self(x)
assert str(out.type.dtype).find('int') != -1
return [x.zeros_like().astype(theano.config.floatX)]
class LT(LogicalComparison):
identity = False
commutative = False
associative = False
def impl(self, x, y):
# built-in < don't support complex
return numpy.less(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s < %(y)s);" % locals()
lt = LT()
class GT(LogicalComparison):
identity = False
commutative = False
associative = False
def impl(self, x, y):
# built-in > don't support complex
return numpy.greater(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s > %(y)s);" % locals()
gt = GT()
class LE(LogicalComparison):
identity = False
commutative = False
associative = False
def impl(self, x, y):
# built-in <= don't support complex
return numpy.less_equal(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s <= %(y)s);" % locals()
le = LE()
class GE(LogicalComparison):
identity = False
commutative = False
associative = False
def impl(self, x, y):
# built-in >= don't support complex
return numpy.greater_equal(x, y)
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s >= %(y)s);" % locals()
ge = GE()
class EQ(LogicalComparison):
identity = False
commutative = True
associative = False
def impl(self, x, y):
return x == y
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s == %(y)s);" % locals()
eq = EQ()
class NEQ(LogicalComparison):
identity = False
commutative = True
associative = False
def impl(self, x, y):
return x != y
def c_code(self, node, name, (x, y), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = (%(x)s != %(y)s);" % locals()
neq = NEQ()
class IsNan(FixedLogicalComparison):
def impl(self, x):
return numpy.isnan(x)
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
return "%(z)s = isnan(%(x)s);" % locals()
isnan = IsNan()
class IsInf(FixedLogicalComparison):
def impl(self, x):
return numpy.isinf(x)
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError()
# Note that the C isinf returns -1 for -Inf and +1 for +Inf, while
# numpy simply returns True: we mimic numpy's behavior here, thus
# the absolute value.
return "%(z)s = abs(isinf(%(x)s));" % locals()
isinf = IsInf()
class InRange(LogicalComparison):
nin = 3
def __init__(self, openlow, openhi):
self.openlow = openlow
self.openhi = openhi
def impl(self, x, low, hi):
if self.openlow and x <= low:
return False
elif not self.openlow and x < low:
return False
if self.openhi and x >= hi:
return False
elif not self.openhi and x > hi:
return False
return True
def c_code(self, node, name, (x, low, hi), (z, ), sub):
if self.openlow:
cmp1 = '>'
else:
cmp1 = '>='
#backport
#cmp1 = '>' if self.openlow else '>='
if self.openhi:
cmp2 = '<'
else:
cmp2 = '<='
#backport
#cmp2 = '<' if self.openhi else '<='
return ("%(z)s = %(x)s %(cmp1)s %(low)s &&"
" %(x)s %(cmp2)s %(hi)s;" % locals())
def grad(self, (x, low, hi), (gz, )):
return None, None, None
inopenrange = InRange(True, True)
inclosedrange = InRange(False, False)
class Switch(ScalarOp):
nin = 3
def impl(self, cond, ift, iff):
if cond:
return ift
else:
return iff
#backport
#return ift if cond else iff
def c_code(self, node, name, (cond, ift, iff), (z, ), sub):
return "%(z)s = %(cond)s ? %(ift)s : %(iff)s;" % locals()
def grad(self, (cond, ift, iff), (gz, )):
first_part = switch(cond, gz, 0.)
second_part = switch(cond, 0., gz)
out = self(cond, ift, iff)
if out.type.dtype in discrete_types:
first_part = 0.
second_part = 0.
# cond does affect the elements of the output so it is connected.
# For the sake of making the gradient convenient we assume that
# condition + epsilon always triggers the same branch as condition
condition_grad = cond.zeros_like().astype(theano.config.floatX)
return (condition_grad, first_part, second_part)
def output_types(self, (cond_t, ift_t, iff_t)):
return upcast_out(ift_t, iff_t)
switch = Switch()
####################
# BIT-WISE OPERATORS
####################
class UnaryBitOp(UnaryScalarOp):
def output_types(self, *input_types):
for i in input_types[0]:
if i not in (int8, int16, int32, int64):
raise TypeError('input to a BitOp must have type int8,'
' int16, int32 or int64... not %s' % i)
return upcast_out(*input_types[0])
def grad(self, inputs, output_gradients):
return [inputs[0].zeros_like().astype(theano.config.floatX)]
class BinaryBitOp(BinaryScalarOp):
def output_types(self, *input_types):
t0, t1 = input_types[0]
for i in input_types[0]:
if i not in (int8, int16, int32, int64):
raise TypeError('input to a BitOp must have type int8,'
' int16, int32 or int64... not %s' % i)
return upcast_out(*input_types[0])
def grad(self, inputs, output_gradients):
a,b = inputs
return [a.zeros_like().astype(theano.config.floatX), b.zeros_like().astype(theano.config.floatX)]
class OR(BinaryBitOp):
identity = 0
commutative = True
associative = True
def impl(self, x, y):
return x | y
def c_code(self, node, name, (x, y), (z, ), sub):
return "%(z)s = (%(x)s | %(y)s);" % locals()
or_ = OR()
class XOR(BinaryBitOp):
identity = 0
commutative = True
associative = True
def impl(self, x, y):
return x ^ y
def c_code(self, node, name, (x, y), (z, ), sub):
return "%(z)s = (%(x)s ^ %(y)s);" % locals()
xor = XOR()
class AND(BinaryBitOp):
identity = 1
commutative = True
associative = True
def impl(self, x, y):
return x & y
def c_code(self, node, name, (x, y), (z, ), sub):
return "%(z)s = (%(x)s & %(y)s);" % locals()
and_ = AND()
class Invert(UnaryBitOp):
def impl(self, x):
return ~x
def c_code(self, node, name, (x,), (z, ), sub):
return "%(z)s = (~%(x)s);" % locals()
invert = Invert()
##############
# Arithmetic
##############
class Maximum(BinaryScalarOp):
commutative = True
associative = True
def impl(self, *inputs):
# The built-in max function don't support complex type
return numpy.maximum(*inputs)
def c_code(self, node, name, (x, y), (z, ), sub):
if any([i.type in complex_types for i in node.inputs]):
raise NotImplementedError()
# Test for both y>x and x>=y to detect NaN
return ('%(z)s = ((%(y)s)>(%(x)s)? (%(y)s): '
'((%(x)s)>=(%(y)s)? (%(x)s): nan("")));' % locals())
def grad(self, (x, y), (gz, )):
assert gz.type not in complex_types
# max is not defined for complex_types
output = self(x, y)
if output.type in discrete_types:
return [x.zeros_like().astype(theano.config.floatX),
y.zeros_like().astype(theano.config.floatX)]
gx = eq(output, x) * gz
gy = eq(output, y) * gz
return (gx, gy)
maximum = Maximum(upcast_out, name='maximum')
class Minimum(BinaryScalarOp):
commutative = True
associative = True
def impl(self, *inputs):
# The built-in min function don't support complex type
return numpy.minimum(*inputs)
def c_code(self, node, name, (x, y), (z, ), sub):
if any([i.type in complex_types for i in node.inputs]):
raise NotImplementedError()
return ('%(z)s = ((%(y)s)<(%(x)s)? (%(y)s): '
'((%(x)s)<=(%(y)s)? (%(x)s): nan("")));' % locals())
def grad(self, (x, y), (gz, )):
assert gz.type not in complex_types
# max is not defined for complex_types
output = minimum(x, y)
if output.type in discrete_types:
return [x.zeros_like().astype(theano.config.floatX),
y.zeros_like().astype(theano.config.floatX)]
gx = eq(output, x) * gz
gy = eq(output, y) * gz
return (gx, gy)
minimum = Minimum(upcast_out, name='minimum')
class Add(ScalarOp):
identity = 0
commutative = True
associative = True
def impl(self, *inputs):
return sum(inputs)
def c_code(self, node, name, inputs, (z, ), sub):
if not inputs:
return z + " = 0;"
else:
return z + " = " + " + ".join(inputs) + ";"
def grad(self, inputs, (gz, )):
if gz.type in complex_types:
raise NotImplementedError()
if self(*inputs).type in discrete_types:
assert gz is not None
retval = []
for ii, inp in enumerate(inputs):
if hasattr(inp, 'zeros_like'):
retval.append(
inp.zeros_like().astype(theano.config.floatX))
else:
retval.append(grad_undefined(self, ii, inp))
else:
retval = []
for i in inputs:
retval += [gz]
return retval
add = Add(upcast_out, name='add')
class Mul(ScalarOp):
identity = 1
commutative = True
associative = True
def impl(self, *inputs):
return numpy.product(inputs)
def c_code(self, node, name, inputs, (z, ), sub):
if not inputs:
return z + " = 1;"
else:
return z + " = " + " * ".join(inputs) + ";"
def grad(self, inputs, (gz, )):
retval = []
# The following 3 lines verify that gz is complex when the
# output is complex. The rest of this function make this supposition.
output_type = self.output_types([i.type for i in inputs])[0]
if output_type in complex_types:
if not gz.type in complex_types:
raise TypeError('Mul with output_type ' + str(output_type) +\
' expected gz type to be complex, got gz with type ' +\
str(gz.type))
if output_type in discrete_types:
return [ipt.zeros_like().astype(theano.config.floatX)
for ipt in inputs]
for input in inputs:
if gz.type in complex_types:
# zr+zi = (xr + xi)(yr + yi)
# zr+zi = (xr*yr - xi*yi) + (xr yi + xi yr )
otherprod = mul(*(utils.difference(inputs, [input])))
yr = real(otherprod)
yi = imag(otherprod)
if input.type in complex_types:
retval += [complex(yr * real(gz) + yi * imag(gz),
yr * imag(gz) - yi * real(gz))]
else:
retval += [yr * real(gz) + yi * imag(gz)]
else:
retval += [mul(*([gz] + utils.difference(inputs,
[input])))]
return retval
mul = Mul(upcast_out, name='mul')
class Sub(BinaryScalarOp):
def impl(self, x, y):
return x - y
def c_code(self, node, name, (x, y), (z, ), sub):
return "%(z)s = %(x)s - %(y)s;" % locals()
def grad(self, (x, y), (gz, )):
if gz.type in complex_types:
raise NotImplementedError()
if (x - y).type in discrete_types:
return [x.zeros_like().astype(theano.config.floatX),
y.zeros_like().astype(theano.config.floatX)]
first_part = gz
second_part = -gz
return first_part, second_part
sub = Sub(upcast_out, name='sub')
def int_or_true_div(x_discrete, y_discrete):
"""
Return 'int' or 'true' depending on the type of division used for x / y.
:param x_discrete: True if `x` is discrete ([unsigned] integer).
:param y_discrete: True if `x` is discrete ([unsigned] integer).
:returns: 'int' if `x / y` should be an integer division, or `true` if it
should be a true division.
Raises an IntegerDivisionError if both `x_discrete` and `y_discrete` are
True and `config.int_division` is set to 'raise'.
This function is used by both scalar/basic.py and tensor.basic/py.
"""
if (x_discrete and y_discrete):
if config.int_division == 'raise':
raise IntegerDivisionError(
"With `config.int_division` set to 'raise', dividing two "
"integer types with '/' is forbidden to avoid confusion "
"between integer and floating point divisions. Please "
"use // for integer division, or if you want a float result "
"either cast one of the arguments to a float or directly call "
"`x.__truediv__(y)`.")
elif config.int_division == 'int':
warnings.warn(
"Division of two integer types with x / y is deprecated, "
"please use x // y for an integer division.",
DeprecationWarning,
stacklevel=4)
return 'int'
elif config.int_division == 'floatX':
return 'true'
else:
raise NotImplementedError(config.int_division)
else:
return 'true'
def div_proxy(x, y):
"""Proxy for either true_div or int_div, depending on types of x, y."""
f = eval('%s_div' % int_or_true_div(as_scalar(x).type in discrete_types,
as_scalar(y).type in discrete_types))
return f(x, y)
class TrueDiv(BinaryScalarOp):
def output_types(self, types):
if all(t in discrete_types for t in types):
return [Scalar(config.floatX)]
else:
return super(TrueDiv, self).output_types(types)
def impl(self, x, y):
x = numpy.asarray(x)
y = numpy.asarray(y)
if all(a.dtype in discrete_types for a in (x, y)):
return numpy.array(float(x) / y, dtype=config.floatX)
else:
return x / y
def c_code(self, node, name, (x, y), (z, ), sub):
# we generate good c code only when both are complex!
if sum([node.inputs[0].type in complex_types,
node.inputs[1].type in complex_types]) == 1:
raise NotImplementedError('type not supported', type)
if (node.inputs[0].type in discrete_types and
node.inputs[1].type in discrete_types):
return "%(z)s = ((double)%(x)s) / %(y)s;" % locals()
return "%(z)s = %(x)s / %(y)s;" % locals()
def grad(self, (x, y), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
# If the output of this op is discrete, then it
# it is locally flat everywhere, so the gradient
# through it is 0.
# This is different from it not being connected
# to the output; x/y is still a function of x
# and y; it's just a step function.
if (x / y).type in discrete_types:
return [x.zeros_like(), y.zeros_like()]
first_part = gz / y
if y.type in complex_types:
raise NotImplementedError()
second_part = -(gz * x) / (y * y)
return first_part, second_part
true_div = TrueDiv(upcast_out, name='true_div')
class IntDiv(BinaryScalarOp):
complex_error = ComplexError(
"Theano does not support integer division (//) on "
"complex numbers, since numpy deprecated it.")
def impl(self, x, y):
return x // y
def c_support_code(self):
# We use a macro as python use % as a special string character,
# and the output of c_code may be run through another level
# of string formatting.
return "#define THEANO_MACRO_MOD(x,y) (x % y)"
def c_code(self, node, name, (x, y), (z,), sub):
t = node.inputs[0].type.upcast(*[i.type for i in node.inputs[1:]])
if t in imap(str, discrete_types):
x_div_y_pp = '(%(x)s / %(y)s)' % locals()
x_div_y_mp = '((-%(x)s) / %(y)s)' % locals()
x_mod_y_mp = 'THEANO_MACRO_MOD((-%(x)s), %(y)s)' % locals()
x_div_y_pm = '(%(x)s / (-%(y)s))' % locals()
x_mod_y_pm = 'THEANO_MACRO_MOD(%(x)s, (-%(y)s))' % locals()
x_div_y_mm = '((-%(x)s) / (-%(y)s))' % locals()
elif t in imap(str, float_types):
# We need to call different functions of math.h
# depending on the type
if t == 'float32':
floor = 'floorf'
fmod = 'fmodf'
elif t == 'float64':
floor = 'floor'
fmod = 'fmod'
else:
raise NotImplementedError('type not supported', t)
x_div_y_pp = '%(floor)s(%(x)s / %(y)s)' % locals()
x_div_y_mp = '%(floor)s((-%(x)s) / %(y)s)' % locals()
x_mod_y_mp = '%(fmod)s((-%(x)s), %(y)s)' % locals()
x_div_y_pm = '%(floor)s(%(x)s / (-%(y)s))' % locals()
x_mod_y_pm = '%(fmod)s(%(x)s, (-%(y)s))' % locals()
x_div_y_mm = '%(floor)s((-%(x)s) / (-%(y)s))' % locals()
elif t in complex_types:
raise self.complex_error
else:
raise NotImplementedError('type not supported', t)
return dedent("""
if (%(x)s < 0) {
if (%(y)s < 0) {
%(z)s = %(x_div_y_mm)s;
} else {
%(z)s = - %(x_div_y_mp)s - ((%(x_mod_y_mp)s == 0) ? 0 : 1);
}
} else {
if (%(y)s < 0) {
%(z)s = - %(x_div_y_pm)s - ((%(x_mod_y_pm)s == 0) ? 0 : 1);
} else {
%(z)s = %(x_div_y_pp)s;
}
}
""") % locals()
def c_code_cache_version(self):
return (2,)
def grad(self, inputs, g_output):
return [None] * len(inputs)
int_div = IntDiv(upcast_out, name='int_div')
floor_div = int_div
def mod_check(x, y):
if (as_scalar(x).type in complex_types or
as_scalar(y).type in complex_types):
# Currently forbidden.
raise Mod.complex_error
else:
return mod(x, y)
class Mod(BinaryScalarOp):
complex_error = ComplexError(
"Theano does not support the mod operator (%) on "
"complex numbers, since numpy deprecated it.")
def impl(self, x, y):
if isinstance(x, numpy.complex) or isinstance(y, numpy.complex):
raise self.complex_error
return x % y
def c_code_cache_version(self):
return (5,)
def c_support_code(self):
# We use a macro as python use % as a special string character,
# and the output of c_code may be run through another level
# of string formatting.
return "#define THEANO_MACRO_MOD(x,y) (x % y)"
def c_code(self, node, name, (x, y), (z, ), sub):
"""
We want the result to have the same sign as python, not the other
implementation of mod.
"""
# raise NotImplementedError("Unlike Python, C's modulo returns negative
# modulo on negative dividend (to implement)")
t = node.inputs[0].type.upcast(*[i.type for i in node.inputs[1:]])
if (str(t) in imap(str, discrete_types) or
t in ['uint8', 'int8', 'uint16', 'int16'] or
t in ['uint32', 'int32', 'uint64', 'int64'] or
t in discrete_types):
# The above or's should not be needed anymore. However, for now we
# keep them out of safety, and verify they are useless with an
# assert.
assert str(t) in imap(str, discrete_types)
x_mod_y = "THEANO_MACRO_MOD(%(x)s, %(y)s)" % locals()
x_mod_ymm = "THEANO_MACRO_MOD(-%(x)s, -%(y)s)" % locals()
x_mod_ypm = "THEANO_MACRO_MOD(%(x)s, -%(y)s)" % locals()
x_mod_ymp = "THEANO_MACRO_MOD(-%(x)s, %(y)s)" % locals()
elif (str(t) in imap(str, float_types) or
t in ['float32', 'float64'] or
t in float_types):
# The above or's should not be needed anymore. However, for now we
# keep them out of safety, and verify they are useless with an
# assert.
assert str(t) in imap(str, float_types)
x_mod_y = "fmod(%(x)s,%(y)s)" % locals()
x_mod_ymm = "fmod(-%(x)s,-%(y)s)" % locals()
x_mod_ypm = "fmod(%(x)s,-%(y)s)" % locals()
x_mod_ymp = "fmod(-%(x)s,%(y)s)" % locals()
elif str(t) in imap(str, complex_types):
raise self.complex_error
else:
raise NotImplementedError('type not supported', t)
return dedent("""
if (%(x)s < 0){
if (%(y)s < 0){
%(z)s = -(%(x_mod_ymm)s);
}else{
%(z)s = - %(x_mod_ymp)s + (%(x_mod_ymp)s != 0 ? %(y)s : 0);
}
}else if (%(y)s < 0){
%(z)s = (%(x_mod_ypm)s) + (%(x_mod_ypm)s != 0 ? %(y)s : 0);
}else{
%(z)s = %(x_mod_y)s;
}
""") % locals()
def grad(self, (x, y), (gz, )):
return None, None
mod = Mod(upcast_out, name='mod')
class Pow(BinaryScalarOp):
def impl(self, x, y):
return x ** y
def c_code(self, node, name, (x, y), (z, ), sub):
if (node.inputs[0].type in complex_types or
node.inputs[1].type in complex_types):
raise NotImplementedError('type not supported', type)
return "%(z)s = pow(%(x)s, %(y)s);" % locals()
def grad(self, (x, y), (gz, )):
if gz.type in complex_types:
raise NotImplementedError()
if self(x, y).type in discrete_types:
return [x.zeros_like().astype(theano.config.floatX),
y.zeros_like().astype(theano.config.floatX)]
first_part = gz * y * x ** (y - 1)
second_part = gz * log(x) * x ** y
return (first_part, second_part)
def c_code_contiguous(self, node, name, (x, y), (z, ), sub):
if not theano.config.lib.amdlibm:
raise theano.gof.utils.MethodNotDefined()
# We compare the dtype AND the broadcast flag
# as this function do not broadcast
if (node.inputs[0].type == node.outputs[0].type and
node.inputs[1].type == node.outputs[0].type and
# amdlibm 3.0 do not have a float64 version of this SIMD function
node.inputs[0].dtype == 'float32' and
node.inputs[1].dtype == 'float32'):
dtype = 'float'
fct = "amd_vrsa_powf"
return """
npy_intp n = PyArray_SIZE(%(z)s);
%(dtype)s * x = (%(dtype)s*) PyArray_DATA(%(x)s);
%(dtype)s * y = (%(dtype)s*) PyArray_DATA(%(y)s);
%(dtype)s * z = (%(dtype)s*) PyArray_DATA(%(z)s);
%(fct)s(n, x, y, z);
""" % locals()
# We compare the dtype and check we broadcast a scalar
elif (node.inputs[0].type == node.outputs[0].type and
node.inputs[1].dtype == node.outputs[0].dtype and
all(node.inputs[1].broadcastable) and
# amdlibm 3.0 do not have a float64 version of this SIMD function
node.inputs[0].dtype == 'float32' and
node.inputs[1].dtype == 'float32'):
dtype = 'float'
fct = "amd_vrsa_powxf"
return """
npy_intp n = PyArray_SIZE(%(z)s);
%(dtype)s * x = (%(dtype)s*) PyArray_DATA(%(x)s);
%(dtype)s * y = (%(dtype)s*) PyArray_DATA(%(y)s);
%(dtype)s * z = (%(dtype)s*) PyArray_DATA(%(z)s);
%(fct)s(n, x, *y, z);
""" % locals()
raise theano.gof.utils.MethodNotDefined()
pow = Pow(upcast_out, name='pow')
class Clip(ScalarOp):
nin = 3
def impl(self, x, min, max):
if x < min:
return min
elif x > max:
return max
else:
return x
def c_code(self, node, name, (x, min, max), (z, ), sub):
return "%(z)s = %(x)s < %(min)s ? %(min)s : %(x)s > %(max)s ? %(max)s : %(x)s;" % locals()
def grad(self, (x, mn, mx), (gz, )):
assert gz.type not in complex_types
gx = ((x > mn) & (x < mx)) * gz
gmn = (x < mn) * gz
gmx = (x > mx) * gz
out = self(x, mn, mx)
def handle_int(v):
if out.type in int_types:
return v.zeros_like().astype(config.floatX)
return v
return map(handle_int, [gx, gmn, gmx])
# Don't allow complex even if numpy do
# As there is no mathematical reason for this function on complex
clip = Clip(upcast_out_no_complex, name='clip')
class Second(BinaryScalarOp):
def impl(self, x, y):
return y
def c_code(self, node, name, (x, y), (z, ), sub):
return "%(z)s = %(y)s;" % locals()
def connection_pattern(self, node):
# x is never connected because its elements are never used
# y is connected because its elements are copied over
return [[False], [True]]
def grad(self, (x, y), (gz, )):
if y.type in continuous_types:
# x is disconnected because the elements of x are not used
return DisconnectedType()(), gz
else:
#when y is discrete, we assume the function can be extended
#to deal with real-valued inputs by rounding them to the
#nearest integer. f(x+eps) thus equals f(x) so the gradient
#is zero, not disconnected or undefined
return DisconnectedType()(), y.zeros_like()
second = Second(transfer_type(1), name='second')
class Identity(UnaryScalarOp):
def impl(self, input):
return input
def c_code(self, node, name, (x, ), (z, ), sub):
return "%(z)s = %(x)s;" % locals()
def grad(self, (x, ), (gz, )):
if x.type in continuous_types:
return gz,
else:
return None,
identity = Identity(same_out, name='identity')
#### CASTING OPERATIONS
class Cast(UnaryScalarOp):
def __init__(self, o_type, name=None):
if not isinstance(o_type, Scalar):
raise TypeError(o_type)
super(Cast, self).__init__(specific_out(o_type), name=name)
self.o_type = o_type
self.ctor = getattr(numpy, o_type.dtype)
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.o_type.dtype)
def impl(self, input):
return self.ctor(input)
def c_code(self, node, name, (x, ), (z, ), sub):
return "%s = (%s)%s;" % (z, node.outputs[0].type.dtype_specs()[1], x)
def grad(self, (x, ), (gz, )):
if self.o_type in continuous_types:
return [gz]
else:
return [x.zeros_like().astype(theano.config.floatX)]
def c_code_cache_version(self):
s = super(Cast, self).c_code_cache_version()
if s:
return (3,) + s
else:
return s
convert_to_int8 = Cast(int8, name='convert_to_int8')
convert_to_int16 = Cast(int16, name='convert_to_int16')
convert_to_int32 = Cast(int32, name='convert_to_int32')
convert_to_int64 = Cast(int64, name='convert_to_int64')
convert_to_uint8 = Cast(uint8, name='convert_to_uint8')
convert_to_uint16 = Cast(uint16, name='convert_to_uint16')
convert_to_uint32 = Cast(uint32, name='convert_to_uint32')
convert_to_uint64 = Cast(uint64, name='convert_to_uint64')
convert_to_float32 = Cast(float32, name='convert_to_float32')
convert_to_float64 = Cast(float64, name='convert_to_float64')
convert_to_complex64 = Cast(complex64, name='convert_to_complex64')
convert_to_complex128 = Cast(complex128, name='convert_to_complex128')
_cast_mapping = {
'int8': convert_to_int8,
'int16': convert_to_int16,
'int32': convert_to_int32,
'int64': convert_to_int64,
'uint8': convert_to_uint8,
'uint16': convert_to_uint16,
'uint32': convert_to_uint32,
'uint64': convert_to_uint64,
'float32': convert_to_float32,
'float64': convert_to_float64,
'complex64': convert_to_complex64,
'complex128': convert_to_complex128}
def cast(x, dtype):
"""Symbolically cast `x` to a Scalar of given `dtype`."""
if dtype == 'floatX':
dtype = config.floatX
_x = as_scalar(x)
if _x.type.dtype == dtype:
return _x
if _x.type.dtype.startswith('complex') and not dtype.startswith('complex'):
raise TypeError('Casting from complex to real is ambiguous: consider'
' real(), imag(), angle() or abs()')
return _cast_mapping[dtype](_x)
class Abs(UnaryScalarOp):
def make_node(self, x):
inputs = [as_scalar(input) for input in [x]]
if inputs[0].type == complex64:
outputs = [float32()]
elif inputs[0].type == complex128:
outputs = [float64()]
else:
outputs = [t() for t in self.output_types(
[input.type for input in inputs])]
return Apply(self, inputs, outputs)
def impl(self, x):
return numpy.abs(x)
def grad(self, (x, ), (gz, )):
if x.type in float_types + complex_types:
return gz * x / abs(x), # formula works for complex and real
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
type = node.inputs[0].type
if type in int_types:
return "%(z)s = abs(%(x)s);" % locals()
if type in float_types:
return "%(z)s = fabs(%(x)s);" % locals()
if type in complex_types:
return "%(z)s = sqrt(%(x)s.real*%(x)s.real + %(x)s.imag*%(x)s.imag);" % locals()
raise NotImplementedError('type not supported', type)
abs_ = Abs(same_out)
class Sgn(UnaryScalarOp):
def impl(self, x):
#casting to output type is handled by filter
return numpy.sign(x)
def grad(self, (x, ), (gz, )):
rval = x.zeros_like()
if rval.type.dtype in discrete_types:
rval = rval.astype(theano.config.floatX)
return [rval]
def c_code(self, node, name, (x, ), (z, ), sub):
#casting is done by compiler
#TODO: use copysign
type = node.inputs[0].type
if type in float_types:
return "%(z)s = (%(x)s >= 0) ? (%(x)s == 0) ? 0.0 : 1.0 : -1.0;" % locals()
if type in int_types:
return "%(z)s = (%(x)s >= 0) ? (%(x)s == 0) ? 0 : 1 : -1;" % locals()
raise TypeError() # complex has no sgn
def c_code_cache_version(self):
s = super(Sgn, self).c_code_cache_version()
if s:
return (3,) + s
else: # if parent is unversioned, we are too
return s
sgn = Sgn(same_out_nocomplex, name='sgn')
class Ceil(UnaryScalarOp):
def impl(self, x):
return numpy.ceil(x)
def grad(self, (x,), (gz,)):
rval = x.zeros_like()
if rval.type.dtype in discrete_types:
rval = rval.astype(theano.config.floatX)
return [rval]
def c_code(self, node, name, (x,), (z,), sub):
return "%(z)s = ceil(%(x)s);" % locals()
ceil = Ceil(same_out_nocomplex, name='ceil')
class Floor(UnaryScalarOp):
def impl(self, x):
return numpy.floor(x)
def grad(self, (x,), (gz,)):
rval = x.zeros_like()
if rval.type.dtype in discrete_types:
rval = rval.astype(theano.config.floatX)
return [rval]
def c_code(self, node, name, (x,), (z,), sub):
return "%(z)s = floor(%(x)s);" % locals()
floor = Floor(same_out_nocomplex, name='floor')
class Trunc(UnaryScalarOp):
def impl(self, x):
return numpy.trunc(x)
def grad(self, (x,), (gz,)):
return [x.zeros_like().astype(theano.config.floatX)]
def c_code(self, node, name, (x,), (z,), sub):
return "%(z)s = %(x)s >= 0? floor(%(x)s): -floor(-%(x)s);" % locals()
trunc = Trunc(same_out_nocomplex, name='trunc')
class RoundHalfToEven(UnaryScalarOp):
"""
This function implement the same rounding than numpy: Round half to even
c/c++ round fct IS DIFFERENT!
See http://en.wikipedia.org/wiki/Rounding for more detail
"""
def impl(self, x):
return numpy.round(x)
def c_code___(self, node, name, (x, ), (z, ), sub):
typ = node.outputs[0].type.dtype
if not node.outputs[0].type.dtype in ['float32', 'float64']:
Exception("The output should be float32 or float64")
return dedent("""
#ifndef ROUNDING_EPSILON
#define ROUNDING_EPSILON 0.0000001
#endif
if (%(x)s < 0.0){
// We implement the else part like that: -else( -%(x)s);
%(typ)s i;
std::modf( -%(x)s, &i );
// If %(x)s is exactly halfway between two integers
if ((-%(x)s -(i +0.5)) < epsilon){
// If 'i' is even then return 'i'
if (std::fmod( i, 2.0 ) < epsilon){
%(z)s = - i;
}else{
// Else return the nearest even integer
%(z)s = - ceil( i +0.5 );
}
}else{
// round to closest
%(z)s = - round(%(x)s+5);
}
}else{
%(typ)s i;
std::modf( %(x)s, &i );
// If %(x)s is exactly halfway between two integers
if ((%(x)s -(i +0.5)) < epsilon){
// If 'i' is even then return 'i'
if (std::fmod( i, 2.0 ) < epsilon){
%(z)s = i;
}else{
// Else return the nearest even integer
%(z)s = ceil( i +0.5 );
}
}else{
// round to closest
%(z)s = round(%(x)s+5);
}
}
#undef ROUNDING_EPSILON
""")
round_half_to_even = RoundHalfToEven(same_out_float_only)
def round_half_away_from_zero_(a):
if a > 0:
return numpy.floor(a + 0.5)
else:
return numpy.ceil(a - 0.5)
round_half_away_from_zero_vec64 = numpy.vectorize(
round_half_away_from_zero_,
doc='round_half_away_from_zero_vec64')
round_half_away_from_zero_vec32 = numpy.vectorize(
round_half_away_from_zero_,
doc='round_half_away_from_zero_vec32',
otypes=['float32'])
def round_half_away_from_zero_vec(a):
if getattr(a, 'dtype', None) == numpy.float32:
return round_half_away_from_zero_vec32(a)
return round_half_away_from_zero_vec64(a)
class RoundHalfAwayFromZero(UnaryScalarOp):
"""
Implement the same rounding algo as c round() fct.
numpy.round fct IS DIFFERENT!
See http://en.wikipedia.org/wiki/Rounding for more detail
"""
def impl(self, x):
return round_half_away_from_zero_vec(x)
def c_code(self, node, name, (x, ), (z, ), sub):
if node.outputs[0].type.dtype in ['float32', 'float64']:
return "%(z)s = round(%(x)s);" % locals()
else:
Exception("The output should be float32 or float64")
round_half_away_from_zero = RoundHalfAwayFromZero(same_out_float_only)
class Neg(UnaryScalarOp):
def impl(self, x):
return -x
def grad(self, (x,), (gz,)):
if x.type in continuous_types:
return -gz,
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
return "%(z)s = -%(x)s;" % locals()
neg = Neg(same_out, name='neg')
class Inv(UnaryScalarOp):
""" multiplicative inverse. Also called reciprocal"""
def impl(self, x):
return 1.0 / x
def grad(self, (x,), (gz,)):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return -gz / (x * x),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
return "%(z)s = 1.0 / %(x)s;" % locals()
inv = Inv(upgrade_to_float, name='inv')
class Log(UnaryScalarOp):
""" log base e """
amd_float32 = "amd_vrsa_logf"
amd_float64 = "amd_vrda_log"
def impl(self, x):
return numpy.log(x)
def grad(self, (x,), (gz,)):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / x,
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
#todo: the version using log2 seems to be very slightly faster
# on some machines for some reason, check if it's worth switching
#return "%(z)s = log2(%(x)s) * 0.69314718055994529;" % locals()
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log(%(x)s);" % locals()
log = Log(upgrade_to_float, name='log')
class Log2(UnaryScalarOp):
""" log base 2 """
amd_float32 = "amd_vrsa_log2f"
amd_float64 = "amd_vrda_log2"
def impl(self, x):
return numpy.log2(x)
def grad(self, (x,), (gz,)):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / (x * math.log(2.0)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log2(%(x)s);" % locals()
log2 = Log2(upgrade_to_float, name='log2')
class Log10(UnaryScalarOp):
""" log base 10 """
amd_float32 = "amd_vrsa_log10f"
amd_float64 = "amd_vrda_log10"
def impl(self, x):
return numpy.log10(x)
def grad(self, (x,), (gz,)):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / (x * numpy.log(10.0)),
else:
return None
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log10(%(x)s);" % locals()
log10 = Log10(upgrade_to_float, name='log10')
class Log1p(UnaryScalarOp):
""" log(1+x) """
def impl(self, x):
return numpy.log1p(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if gz.type in float_types:
return [gz / (1 + x)]
return [None]
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = log1p(%(x)s);" % locals()
log1p = Log1p(upgrade_to_float, name='log1p')
class Exp(UnaryScalarOp):
amd_float32 = "amd_vrsa_expf"
amd_float64 = "amd_vrda_exp"
def impl(self, x):
return numpy.exp(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
elif x.type in float_types:
return gz * exp(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = exp(%(x)s);" % locals()
exp = Exp(upgrade_to_float, name='exp')
class Exp2(UnaryScalarOp):
def impl(self, x):
return numpy.exp2(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
elif x.type in float_types:
return gz * exp2(x) * log(numpy.cast[x.type](2)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = exp2(%(x)s);" % locals()
exp2 = Exp2(upgrade_to_float, name='exp2')
class Expm1(UnaryScalarOp):
def impl(self, x):
return numpy.expm1(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
elif x.type in float_types:
return gz * exp(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = exp(%(x)s) - 1;" % locals()
expm1 = Expm1(upgrade_to_float, name='expm1')
class Sqr(UnaryScalarOp):
def impl(self, x):
return x * x
def grad(self, (x, ), (gz, )):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * x * 2,
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
return "%(z)s = %(x)s * %(x)s;" % locals()
sqr = Sqr(same_out, name='sqr')
class Sqrt(UnaryScalarOp):
def impl(self, x):
return numpy.sqrt(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return (gz * 0.5) / sqrt(x),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = sqrt(%(x)s);" % locals()
sqrt = Sqrt(upgrade_to_float, name='sqrt')
class Deg2Rad(UnaryScalarOp):
def impl(self, x):
return numpy.deg2rad(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * numpy.asarray(numpy.pi / 180, gz.type),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = %(x)s * (M_PI / 180.0);" % locals()
deg2rad = Deg2Rad(upgrade_to_float, name='deg2rad')
class Rad2Deg(UnaryScalarOp):
def impl(self, x):
return numpy.rad2deg(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * numpy.asarray(180. / numpy.pi, gz.type),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = %(x)s * (180.0 / M_PI);" % locals()
rad2deg = Rad2Deg(upgrade_to_float, name='rad2deg')
class Cos(UnaryScalarOp):
amd_float32 = "amd_vrsa_cosf"
amd_float64 = "amd_vrda_cos"
def impl(self, x):
return numpy.cos(x)
def grad(self, (x, ), (gz, )):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return -gz * sin(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = cos(%(x)s);" % locals()
cos = Cos(upgrade_to_float, name='cos')
class ArcCos(UnaryScalarOp):
def impl(self, x):
return numpy.arccos(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return - gz / sqrt(numpy.cast[x.type](1) - sqr(x)),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = acos(%(x)s);" % locals()
arccos = ArcCos(upgrade_to_float, name='arccos')
class Sin(UnaryScalarOp):
amd_float32 = "amd_vrsa_sinf"
amd_float64 = "amd_vrda_sin"
def impl(self, x):
return numpy.sin(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * cos(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = sin(%(x)s);" % locals()
sin = Sin(upgrade_to_float, name='sin')
class ArcSin(UnaryScalarOp):
def impl(self, x):
return numpy.arcsin(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / sqrt(numpy.cast[x.type](1) - sqr(x)),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = asin(%(x)s);" % locals()
arcsin = ArcSin(upgrade_to_float, name='arcsin')
class Tan(UnaryScalarOp):
def impl(self, x):
return numpy.tan(x)
def grad(self, (x,), (gz,)):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / sqr(cos(x)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = tan(%(x)s);" % locals()
tan = Tan(upgrade_to_float, name='tan')
class ArcTan(UnaryScalarOp):
def impl(self, x):
return numpy.arctan(x)
def grad(self, (x,), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / (numpy.cast[x.type](1) + sqr(x)),
else:
return None,
def c_code(self, node, name, (x,), (z,), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = atan(%(x)s);" % locals()
arctan = ArcTan(upgrade_to_float, name='arctan')
class ArcTan2(BinaryScalarOp):
def impl(self, y, x):
return numpy.arctan2(y, x)
def grad(self, (y, x), (gz,)):
if gz.type in complex_types:
raise NotImplementedError()
if x.type in float_types and y.type in float_types:
return [gz * x / (sqr(x) + sqr(y)),
gz * neg(y) / (sqr(x) + sqr(y))]
else:
return None,
def c_code(self, node, name, (y, x), (z,), sub):
if (node.inputs[0].type in complex_types or
node.inputs[1].type in complex_types):
raise NotImplementedError('type not supported', type)
return "%(z)s = atan2(%(y)s, %(x)s);" % locals()
arctan2 = ArcTan2(upgrade_to_float, name='arctan2')
class Cosh(UnaryScalarOp):
"""
cosh(x) = (exp(x) + exp(-x)) / 2
"""
def impl(self, x):
return numpy.cosh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * sinh(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = cosh(%(x)s);" % locals()
cosh = Cosh(upgrade_to_float, name='cosh')
class ArcCosh(UnaryScalarOp):
def impl(self, x):
return numpy.arccosh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / sqrt(sqr(x) - numpy.cast[x.type](1)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = acosh(%(x)s);" % locals()
arccosh = ArcCosh(upgrade_to_float, name='arccosh')
class Sinh(UnaryScalarOp):
"""
sinh(x) = (exp(x) - exp(-x)) / 2
"""
def impl(self, x):
return numpy.sinh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * cosh(x),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = sinh(%(x)s);" % locals()
sinh = Sinh(upgrade_to_float, name='sinh')
class ArcSinh(UnaryScalarOp):
def impl(self, x):
return numpy.arcsinh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / sqrt(sqr(x) + numpy.cast[x.type](1)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = asinh(%(x)s);" % locals()
arcsinh = ArcSinh(upgrade_to_float, name='arcsinh')
class Tanh(UnaryScalarOp):
"""
tanh(x) = sinh(x) / cosh(x)
= (exp(2*x) - 1) / (exp(2*x) + 1)
"""
def impl(self, x):
return numpy.tanh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz * (1 - sqr(tanh(x))),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = tanh(%(x)s);" % locals()
tanh = Tanh(upgrade_to_float, name='tanh')
class ArcTanh(UnaryScalarOp):
def impl(self, x):
return numpy.arctanh(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
raise NotImplementedError()
if x.type in float_types:
return gz / (numpy.cast[x.type](1) - sqr(x)),
else:
return None,
def c_code(self, node, name, (x, ), (z, ), sub):
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
return "%(z)s = atanh(%(x)s);" % locals()
arctanh = ArcTanh(upgrade_to_float, name='arctanh')
class Real(UnaryScalarOp):
"""Extract the real coordinate of a complex number. """
def impl(self, x):
return numpy.real(x)
def grad(self, (x, ), (gz, )):
return [complex(gz, 0)]
real = Real(real_out, name='real')
class Imag(UnaryScalarOp):
def impl(self, x):
return numpy.imag(x)
def grad(self, (x, ), (gz, )):
if x.type in complex_types:
return [complex(0, gz)]
elif x.type in float_types:
return [second(x, 0)]
else:
return [None]
imag = Imag(real_out, name='imag')
class Angle(UnaryScalarOp):
def impl(self, x):
return numpy.angle(x)
def grad(self, (c, ), (gtheta, )):
# y = x.imag
# r = sqrt(y**2 + x.real**2)
# g = y/r
# if x == 0 and y == 0:
# theta = 0
# elif x >= 0:
# theta = numpy.arcsin(g)
# else:
# theta = -numpy.arcsin(g)+numpy.pi
x = real(c)
y = imag(c)
r = abs(c)
gr = -gtheta * y / (r ** 2 * sqrt(1 - (y / r) ** 2))
gx = gr * x / r
gy = gr * y / r
if c in complex_types:
return [cast(complex(gx, gy), x.type.dtype)]
elif c in float_types:
return [cast(second(x, 0), x.type.dtype)]
else:
return [None]
angle = Angle(specific_out(float64), name='angle')
class Complex(BinaryScalarOp):
@staticmethod
def output_types_preference(x, y):
if x in complex_types:
raise TypeError(x)
if y in complex_types:
raise TypeError(y)
up = Scalar.upcast(x, y)
if up in ('float64', 'int64', 'uint64', 'int32', 'uint32'):
return [complex128]
else:
return [complex64]
def impl(self, x, y):
return numpy.complex(x, y)
def grad(self, (x, y), (gz,)):
return [cast(real(gz), x.type.dtype),
cast(imag(gz), y.type.dtype)]
complex = Complex(name='complex')
class Conj(UnaryScalarOp):
def impl(self, x):
return numpy.conj(x)
conj = Conj(same_out, name='conj')
class ComplexFromPolar(BinaryScalarOp):
@staticmethod
def output_types_preference(x, y):
return Complex.output_types_preference(x, y)
def impl(self, r, theta):
if r < 0:
raise ValueError('polar radius must be non-negative', r)
x = r * numpy.cos(theta)
y = r * numpy.sin(theta)
if x.dtype == 'float32':
return numpy.complex64(numpy.complex(x, y))
else:
return numpy.complex128(numpy.complex(x, y))
def grad(self, (r, theta), (gz,)):
gr = gz * complex_from_polar(1, theta)
gtheta = gz * complex_from_polar(r, -theta)
return [gr, gtheta]
complex_from_polar = ComplexFromPolar(name='complex_from_polar')
class Composite(ScalarOp):
"""
Composite is an Op that takes a graph of scalar operations and
produces c code for the whole graph. Its purpose is to implement loop
fusion.
Composite depends on all the Ops in its graph having C code.
"""
def __str__(self):
return self.name
def make_new_inplace(self, output_types_preference=None, name=None):
"""
This op.__init__ fct don't have the same parameter as other scalar op.
This break the insert_inplace_optimizer optimization.
This fct allow fix patch this.
"""
out = self.__class__(self.inputs, self.outputs)
if name:
out.name = name
else:
name = out.name
super(Composite, out).__init__(output_types_preference, name)
return out
def init_c_code(self):
"""Return the C code for this Composite Op. """
subd = dict(
zip(self.fgraph.inputs,
["%%(i%i)s" % i for i in xrange(len(self.fgraph.inputs))])
+ zip(self.fgraph.outputs,
["%%(o%i)s" % i for i in xrange(len(self.fgraph.outputs))]))
for orphan in self.fgraph.variables: # fgraph.orphans:
if orphan.owner is None and orphan not in self.fgraph.inputs:
if isinstance(orphan, Constant):
subd[orphan] = orphan.type.c_literal(orphan.data)
else:
raise ValueError(
"All orphans in the fgraph to Composite must"
" be Constant instances.")
_c_code = "{\n"
i = 0
j = 0
self.nodenames = ["%(nodename)s_" + ('subnode%i' % j)
for j, n in enumerate(self.fgraph.toposort())]
for j, node in enumerate(self.fgraph.toposort()):
for output in node.outputs:
if output not in subd:
i += 1
name = "V%%(id)s_tmp%i" % i
subd[output] = name
_c_code += "%s %s;\n" % (
output.type.dtype_specs()[1], name)
s = node.op.c_code(node,
self.nodenames[j],
[subd[input] for input in node.inputs],
[subd[output] for output in node.outputs],
dict(fail="%(fail)s",
id="%%(id)s_%i" % j))
_c_code += s
_c_code += "\n"
_c_code += "}\n"
self._c_code = _c_code
def init_py_impls(self):
"""Return a list of functions that compute each output of self
"""
def compose_impl(r):
# this is not optimal at all eg in add(*1 -> mul(x, y), *1)
# it will calculate *1 twice
# it also doesn't follow fgraph.toposort but that's (presumably)
# still correct since we only have scalar ops
if r in self.fgraph.inputs:
idx = self.fgraph.inputs.index(r)
return lambda inputs: inputs[idx]
elif r.owner is None: # in fgraph.orphans:
return lambda inputs: r.data
node = r.owner
producers = [compose_impl(input) for input in node.inputs]
return lambda inputs: node.op.impl(*[p(inputs) for p in producers])
self._impls = [compose_impl(r) for r in self.fgraph.outputs]
def init_name(self):
"""Return a readable string representation of self.fgraph
"""
try:
rval = self.name
except AttributeError:
if 0:
l = []
for n in self.fgraph.toposort():
if hasattr(n.op, "name") and n.op.name is not None:
v = n.op.name
if v.startswith("Composite"):
v = v[len("Composite"):]
else:
v = n.op.__class__.__name__
l.append(v)
rval = "Composite{" + ",".join(l) + "}"
else:
for i, r in enumerate(self.fgraph.inputs):
r.name = 'i%i' % i
for i, r in enumerate(self.fgraph.outputs):
r.name = 'o%i' % i
io = set(self.fgraph.inputs + self.fgraph.outputs)
for i, r in enumerate(self.fgraph.variables):
if r not in io and len(r.clients) > 1:
r.name = 't%i' % i
rval = "Composite{%s}" % str(self.fgraph)
self.name = rval
def init_fgraph(self):
fgraph = FunctionGraph(*gof.graph.clone(self.inputs, self.outputs))
gof.MergeOptimizer().optimize(fgraph)
for node in fgraph.apply_nodes:
if not isinstance(node.op, ScalarOp):
raise ValueError("The fgraph to Composite must be exclusively"
" composed of ScalarOp instances.")
self.fgraph = fgraph
def __init__(self, inputs, outputs):
self.inputs = copy(inputs)
self.outputs = copy(outputs)
self.inputs_type = tuple([input.type for input in inputs])
self.outputs_type = tuple([output.type for output in outputs])
self.nin = len(inputs)
self.nout = len(outputs)
self.init_fgraph() # self.fgraph
self.init_name() # self.name
self.init_c_code() # self._c_code and self.nodenames
self.init_py_impls() # self._impls
def output_types(self, input_types):
if tuple(input_types) != self.inputs_type:
raise TypeError("Wrong types for Composite. Expected %s, got %s."
% (self.inputs_type, tuple(input_types)))
return self.outputs_type
def make_node(self, *inputs):
if (tuple([i.type for i in self.inputs]) ==
tuple([i.type for i in inputs])):
return super(Composite, self).make_node(*inputs)
else:
# Make a new op with the right input type.
assert len(inputs) == self.nin
res = theano.compile.rebuild_collect_shared(
self.outputs,
replace=dict(zip(self.inputs, inputs)),
rebuild_strict=False)
# After rebuild_collect_shared, the Variable in inputs
# are not necessarily in the graph represented by res.
# res[2][0] is a dict that map from the original variable to the
# cloned variable.
cloned_inputs = [res[2][0][i] for i in inputs]
node = Composite(cloned_inputs, res[1]).make_node(*inputs)
return node
def perform(self, node, inputs, output_storage):
for storage, impl in zip(output_storage, self._impls):
storage[0] = impl(inputs)
def impl(self, *inputs):
output_storage = [[None] for i in xrange(self.nout)]
self.perform(None, inputs, output_storage)
return utils.to_return_values([storage[0] for storage in
output_storage])
def grad(self, inputs, output_grads):
raise NotImplementedError("grad is not implemented for Composite")
def c_code(self, node, nodename, inames, onames, sub):
d = dict(zip(["i%i" % i for i in xrange(len(inames))],
inames) +
zip(["o%i" % i for i in xrange(len(onames))],
onames),
**sub)
d['nodename'] = nodename
if not 'id' in sub:
#The use of a dummy id is safe as the code is in a separate block.
#It won't generate conflicting variable name.
d['id'] = '_DUMMY_ID_'
return self._c_code % d
def c_code_cache_version(self):
rval = [3]
for x in self.fgraph.toposort():
xv = x.op.c_code_cache_version()
if xv:
rval.append(xv)
else:
return ()
return tuple(rval)
def c_support_code(self):
rval = []
for subnode in self.fgraph.toposort():
try:
rval.append(subnode.op.c_support_code())
except gof.utils.MethodNotDefined:
pass
# remove duplicate code blocks
return "\n".join(sorted(set(rval)))
def c_support_code_apply(self, node, name):
rval = []
for subnode, subnodename in zip(self.fgraph.toposort(), self.nodenames):
try:
subnode_support_code = subnode.op.c_support_code_apply(
subnode,
subnodename % dict(nodename=name))
if subnode_support_code:
rval.append(subnode_support_code)
except gof.utils.MethodNotDefined:
pass
# there should be no need to remove duplicate code blocks because
# each block should have been specialized for the given nodename.
# Any block that isn't specialized should be returned via
# c_support_code instead of c_support_code_apply.
return "\n".join(rval)
def __eq__(self, other):
if self is other:
return True
if (type(self) != type(other)
or self.nin != other.nin
or self.nout != other.nout):
return False
# see __hash__ for comment on why there is no mention of fgraph
# or module cache key here.
return (self._c_code == other._c_code)
def __hash__(self):
rval = hash((type(self),
self.nin,
self.nout,
self._c_code))
# Note that in general, the configparser settings at the time
# of code generation (__init__) affect the semantics of this Op.
# This function assumes that all relevant info about the configparser
# is embodied in _c_code. So the _c_code, rather than self.fgraph,
# is the signature of the semantics of this Op.
# _c_code is preserved through unpickling, so the Op will not change
# semantics when it is reloaded with different configparser
# settings.
return rval
def __getstate__(self):
rval = dict(self.__dict__)
del rval['_impls']
del rval['fgraph']
return rval
def __setstate__(self, d):
self.__dict__.update(d)
# We must call init to set fgraph and _impls again, as otherwise
# self.perform will not work.
self.init_fgraph()
self.init_py_impls()
assert self._c_code
| return obj.itemsize |
_newclient.py | # -*- test-case-name: twisted.web.test.test_newclient -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An U{HTTP 1.1<http://www.w3.org/Protocols/rfc2616/rfc2616.html>} client.
The way to use the functionality provided by this module is to:
- Connect a L{HTTP11ClientProtocol} to an HTTP server
- Create a L{Request} with the appropriate data
- Pass the request to L{HTTP11ClientProtocol.request}
- The returned Deferred will fire with a L{Response} object
- Create a L{IProtocol} provider which can handle the response body
- Connect it to the response with L{Response.deliverBody}
- When the protocol's C{connectionLost} method is called, the response is
complete. See L{Response.deliverBody} for details.
Various other classes in this module support this usage:
- HTTPParser is the basic HTTP parser. It can handle the parts of HTTP which
are symmetric between requests and responses.
- HTTPClientParser extends HTTPParser to handle response-specific parts of
HTTP. One instance is created for each request to parse the corresponding
response.
"""
__metaclass__ = type
from zope.interface import implements
from twisted.python import log
from twisted.python.components import proxyForInterface
from twisted.python.reflect import fullyQualifiedName
from twisted.python.failure import Failure
from twisted.internet.interfaces import IConsumer, IPushProducer
from twisted.internet.error import ConnectionDone
from twisted.internet.defer import Deferred, succeed, fail, maybeDeferred
from twisted.internet.defer import CancelledError
from twisted.internet.protocol import Protocol
from twisted.protocols.basic import LineReceiver
from twisted.web.iweb import UNKNOWN_LENGTH, IResponse, IClientRequest
from twisted.web.http_headers import Headers
from twisted.web.http import NO_CONTENT, NOT_MODIFIED
from twisted.web.http import _DataLoss, PotentialDataLoss
from twisted.web.http import _IdentityTransferDecoder, _ChunkedTransferDecoder
# States HTTPParser can be in
STATUS = 'STATUS'
HEADER = 'HEADER'
BODY = 'BODY'
DONE = 'DONE'
class BadHeaders(Exception):
"""
Headers passed to L{Request} were in some way invalid.
"""
class ExcessWrite(Exception):
"""
The body L{IBodyProducer} for a request tried to write data after
indicating it had finished writing data.
"""
class ParseError(Exception):
"""
Some received data could not be parsed.
@ivar data: The string which could not be parsed.
"""
def __init__(self, reason, data):
Exception.__init__(self, reason, data)
self.data = data
class BadResponseVersion(ParseError):
"""
The version string in a status line was unparsable.
"""
class _WrapperException(Exception):
"""
L{_WrapperException} is the base exception type for exceptions which
include one or more other exceptions as the low-level causes.
@ivar reasons: A list of exceptions. See subclass documentation for more
details.
"""
def __init__(self, reasons):
Exception.__init__(self, reasons)
self.reasons = reasons
class RequestGenerationFailed(_WrapperException):
"""
There was an error while creating the bytes which make up a request.
@ivar reasons: A C{list} of one or more L{Failure} instances giving the
reasons the request generation was considered to have failed.
"""
class RequestTransmissionFailed(_WrapperException):
"""
There was an error while sending the bytes which make up a request.
@ivar reasons: A C{list} of one or more L{Failure} instances giving the
reasons the request transmission was considered to have failed.
"""
class ConnectionAborted(Exception):
"""
The connection was explicitly aborted by application code.
"""
class WrongBodyLength(Exception):
"""
An L{IBodyProducer} declared the number of bytes it was going to
produce (via its C{length} attribute) and then produced a different number
of bytes.
"""
class ResponseDone(Exception):
"""
L{ResponseDone} may be passed to L{IProtocol.connectionLost} on the
protocol passed to L{Response.deliverBody} and indicates that the entire
response has been delivered.
"""
class ResponseFailed(_WrapperException):
"""
L{ResponseFailed} indicates that all of the response to a request was not
received for some reason.
@ivar reasons: A C{list} of one or more L{Failure} instances giving the
reasons the response was considered to have failed.
@ivar response: If specified, the L{Response} received from the server (and
in particular the status code and the headers).
"""
def __init__(self, reasons, response=None):
_WrapperException.__init__(self, reasons)
self.response = response
class ResponseNeverReceived(ResponseFailed):
"""
A L{ResponseFailed} that knows no response bytes at all have been received.
"""
class RequestNotSent(Exception):
"""
L{RequestNotSent} indicates that an attempt was made to issue a request but
for reasons unrelated to the details of the request itself, the request
could not be sent. For example, this may indicate that an attempt was made
to send a request using a protocol which is no longer connected to a
server.
"""
def _callAppFunction(function):
"""
Call C{function}. If it raises an exception, log it with a minimal
description of the source.
@return: C{None}
"""
try:
function()
except:
log.err(None, "Unexpected exception from %s" % (
fullyQualifiedName(function),))
class HTTPParser(LineReceiver):
"""
L{HTTPParser} handles the parsing side of HTTP processing. With a suitable
subclass, it can parse either the client side or the server side of the
connection.
@ivar headers: All of the non-connection control message headers yet
received.
@ivar state: State indicator for the response parsing state machine. One
of C{STATUS}, C{HEADER}, C{BODY}, C{DONE}.
@ivar _partialHeader: C{None} or a C{list} of the lines of a multiline
header while that header is being received.
"""
# NOTE: According to HTTP spec, we're supposed to eat the
# 'Proxy-Authenticate' and 'Proxy-Authorization' headers also, but that
# doesn't sound like a good idea to me, because it makes it impossible to
# have a non-authenticating transparent proxy in front of an authenticating
# proxy. An authenticating proxy can eat them itself. -jknight
#
# Further, quoting
# http://homepages.tesco.net/J.deBoynePollard/FGA/web-proxy-connection-header.html
# regarding the 'Proxy-Connection' header:
#
# The Proxy-Connection: header is a mistake in how some web browsers
# use HTTP. Its name is the result of a false analogy. It is not a
# standard part of the protocol. There is a different standard
# protocol mechanism for doing what it does. And its existence
# imposes a requirement upon HTTP servers such that no proxy HTTP
# server can be standards-conforming in practice.
#
# -exarkun
# Some servers (like http://news.ycombinator.com/) return status lines and
# HTTP headers delimited by \n instead of \r\n.
delimiter = '\n'
CONNECTION_CONTROL_HEADERS = set([
'content-length', 'connection', 'keep-alive', 'te', 'trailers',
'transfer-encoding', 'upgrade', 'proxy-connection'])
def connectionMade(self):
self.headers = Headers()
self.connHeaders = Headers()
self.state = STATUS
self._partialHeader = None
def switchToBodyMode(self, decoder):
"""
Switch to body parsing mode - interpret any more bytes delivered as
part of the message body and deliver them to the given decoder.
"""
if self.state == BODY:
raise RuntimeError("already in body mode")
self.bodyDecoder = decoder
self.state = BODY
self.setRawMode()
def lineReceived(self, line):
"""
Handle one line from a response.
"""
# Handle the normal CR LF case.
if line[-1:] == '\r':
line = line[:-1]
if self.state == STATUS:
self.statusReceived(line)
self.state = HEADER
elif self.state == HEADER:
if not line or line[0] not in ' \t':
if self._partialHeader is not None:
header = ''.join(self._partialHeader)
name, value = header.split(':', 1)
value = value.strip()
self.headerReceived(name, value)
if not line:
# Empty line means the header section is over.
self.allHeadersReceived()
else:
# Line not beginning with LWS is another header.
self._partialHeader = [line]
else:
# A line beginning with LWS is a continuation of a header
# begun on a previous line.
self._partialHeader.append(line)
def rawDataReceived(self, data):
"""
Pass data from the message body to the body decoder object.
"""
self.bodyDecoder.dataReceived(data)
def isConnectionControlHeader(self, name):
"""
Return C{True} if the given lower-cased name is the name of a
connection control header (rather than an entity header).
According to RFC 2616, section 14.10, the tokens in the Connection
header are probably relevant here. However, I am not sure what the
practical consequences of either implementing or ignoring that are.
So I leave it unimplemented for the time being.
"""
return name in self.CONNECTION_CONTROL_HEADERS
def statusReceived(self, status):
"""
Callback invoked whenever the first line of a new message is received.
Override this.
@param status: The first line of an HTTP request or response message
without trailing I{CR LF}.
@type status: C{str}
"""
def headerReceived(self, name, value):
"""
Store the given header in C{self.headers}.
"""
name = name.lower()
if self.isConnectionControlHeader(name):
headers = self.connHeaders
else:
headers = self.headers
headers.addRawHeader(name, value)
def allHeadersReceived(self):
"""
Callback invoked after the last header is passed to C{headerReceived}.
Override this to change to the C{BODY} or C{DONE} state.
"""
self.switchToBodyMode(None)
class HTTPClientParser(HTTPParser):
"""
An HTTP parser which only handles HTTP responses.
@ivar request: The request with which the expected response is associated.
@type request: L{Request}
@ivar NO_BODY_CODES: A C{set} of response codes which B{MUST NOT} have a
body.
@ivar finisher: A callable to invoke when this response is fully parsed.
@ivar _responseDeferred: A L{Deferred} which will be called back with the
response when all headers in the response have been received.
Thereafter, C{None}.
@ivar _everReceivedData: C{True} if any bytes have been received.
"""
NO_BODY_CODES = set([NO_CONTENT, NOT_MODIFIED])
_transferDecoders = {
'chunked': _ChunkedTransferDecoder,
}
bodyDecoder = None
def __init__(self, request, finisher):
self.request = request
self.finisher = finisher
self._responseDeferred = Deferred()
self._everReceivedData = False
def dataReceived(self, data):
"""
Override so that we know if any response has been received.
"""
self._everReceivedData = True
HTTPParser.dataReceived(self, data)
def parseVersion(self, strversion):
"""
Parse version strings of the form Protocol '/' Major '.' Minor. E.g.
'HTTP/1.1'. Returns (protocol, major, minor). Will raise ValueError
on bad syntax.
"""
try:
proto, strnumber = strversion.split('/')
major, minor = strnumber.split('.')
major, minor = int(major), int(minor)
except ValueError, e:
raise BadResponseVersion(str(e), strversion)
if major < 0 or minor < 0:
raise BadResponseVersion("version may not be negative", strversion)
return (proto, major, minor)
def statusReceived(self, status):
"""
Parse the status line into its components and create a response object
to keep track of this response's state.
"""
parts = status.split(' ', 2)
if len(parts) != 3:
raise ParseError("wrong number of parts", status)
try:
statusCode = int(parts[1])
except ValueError:
raise ParseError("non-integer status code", status)
self.response = Response._construct(
self.parseVersion(parts[0]),
statusCode,
parts[2],
self.headers,
self.transport,
self.request)
def _finished(self, rest):
"""
Called to indicate that an entire response has been received. No more
bytes will be interpreted by this L{HTTPClientParser}. Extra bytes are
passed up and the state of this L{HTTPClientParser} is set to I{DONE}.
@param rest: A C{str} giving any extra bytes delivered to this
L{HTTPClientParser} which are not part of the response being
parsed.
"""
self.state = DONE
self.finisher(rest)
def isConnectionControlHeader(self, name):
"""
Content-Length in the response to a HEAD request is an entity header,
not a connection control header.
"""
if self.request.method == 'HEAD' and name == 'content-length':
return False
return HTTPParser.isConnectionControlHeader(self, name)
def allHeadersReceived(self):
"""
Figure out how long the response body is going to be by examining
headers and stuff.
"""
if (self.response.code in self.NO_BODY_CODES
or self.request.method == 'HEAD'):
self.response.length = 0
# The order of the next two lines might be of interest when adding
# support for pipelining.
self._finished(self.clearLineBuffer())
self.response._bodyDataFinished()
else:
transferEncodingHeaders = self.connHeaders.getRawHeaders(
'transfer-encoding')
if transferEncodingHeaders:
# This could be a KeyError. However, that would mean we do not
# know how to decode the response body, so failing the request
# is as good a behavior as any. Perhaps someday we will want
# to normalize/document/test this specifically, but failing
# seems fine to me for now.
transferDecoder = self._transferDecoders[transferEncodingHeaders[0].lower()]
# If anyone ever invents a transfer encoding other than
# chunked (yea right), and that transfer encoding can predict
# the length of the response body, it might be sensible to
# allow the transfer decoder to set the response object's
# length attribute.
else:
contentLengthHeaders = self.connHeaders.getRawHeaders('content-length')
if contentLengthHeaders is None:
contentLength = None
elif len(contentLengthHeaders) == 1:
contentLength = int(contentLengthHeaders[0])
self.response.length = contentLength
else:
# "HTTP Message Splitting" or "HTTP Response Smuggling"
# potentially happening. Or it's just a buggy server.
raise ValueError(
"Too many Content-Length headers; response is invalid")
if contentLength == 0:
self._finished(self.clearLineBuffer())
transferDecoder = None
else:
transferDecoder = lambda x, y: _IdentityTransferDecoder(
contentLength, x, y)
if transferDecoder is None:
self.response._bodyDataFinished()
else:
# Make sure as little data as possible from the response body
# gets delivered to the response object until the response
# object actually indicates it is ready to handle bytes
# (probably because an application gave it a way to interpret
# them).
self.transport.pauseProducing()
self.switchToBodyMode(transferDecoder(
self.response._bodyDataReceived,
self._finished))
# This must be last. If it were first, then application code might
# change some state (for example, registering a protocol to receive the
# response body). Then the pauseProducing above would be wrong since
# the response is ready for bytes and nothing else would ever resume
# the transport.
self._responseDeferred.callback(self.response)
del self._responseDeferred
def connectionLost(self, reason):
if self.bodyDecoder is not None:
try:
try:
self.bodyDecoder.noMoreData()
except PotentialDataLoss:
self.response._bodyDataFinished(Failure())
except _DataLoss:
self.response._bodyDataFinished(
Failure(ResponseFailed([reason, Failure()],
self.response)))
else:
self.response._bodyDataFinished()
except:
# Handle exceptions from both the except suites and the else
# suite. Those functions really shouldn't raise exceptions,
# but maybe there's some buggy application code somewhere
# making things difficult.
log.err()
elif self.state != DONE:
if self._everReceivedData:
exceptionClass = ResponseFailed
else:
exceptionClass = ResponseNeverReceived
self._responseDeferred.errback(Failure(exceptionClass([reason])))
del self._responseDeferred
class Request:
"""
A L{Request} instance describes an HTTP request to be sent to an HTTP
server.
@ivar method: See L{__init__}.
@ivar uri: See L{__init__}.
@ivar headers: See L{__init__}.
@ivar bodyProducer: See L{__init__}.
@ivar persistent: See L{__init__}.
@ivar _parsedURI: Parsed I{URI} for the request, or C{None}.
@type _parsedURI: L{_URI}
"""
implements(IClientRequest)
def __init__(self, method, uri, headers, bodyProducer, persistent=False):
"""
@param method: The HTTP method to for this request, ex: 'GET', 'HEAD',
'POST', etc.
@type method: L{str}
@param uri: The relative URI of the resource to request. For example,
C{'/foo/bar?baz=quux'}.
@type uri: L{str}
@param headers: Headers to be sent to the server. It is important to
note that this object does not create any implicit headers. So it
is up to the HTTP Client to add required headers such as 'Host'.
@type headers: L{twisted.web.http_headers.Headers}
@param bodyProducer: C{None} or an L{IBodyProducer} provider which
produces the content body to send to the remote HTTP server.
@param persistent: Set to C{True} when you use HTTP persistent
connection, defaults to C{False}.
@type persistent: L{bool}
"""
self.method = method
self.uri = uri
self.headers = headers
self.bodyProducer = bodyProducer
self.persistent = persistent
self._parsedURI = None
@classmethod
def _construct(cls, method, uri, headers, bodyProducer, persistent=False,
parsedURI=None):
"""
Private constructor.
@param method: See L{__init__}.
@param uri: See L{__init__}.
@param headers: See L{__init__}.
@param bodyProducer: See L{__init__}.
@param persistent: See L{__init__}.
@param parsedURI: See L{Request._parsedURI}.
@return: L{Request} instance.
"""
request = cls(method, uri, headers, bodyProducer, persistent)
request._parsedURI = parsedURI
return request
@property
def absoluteURI(self):
"""
The absolute URI of the request as C{bytes}, or C{None} if the
absolute URI cannot be determined.
"""
return getattr(self._parsedURI, 'toBytes', lambda: None)()
def _writeHeaders(self, transport, TEorCL):
hosts = self.headers.getRawHeaders('host', ())
if len(hosts) != 1:
raise BadHeaders("Exactly one Host header required")
# In the future, having the protocol version be a parameter to this
# method would probably be good. It would be nice if this method
# weren't limited to issueing HTTP/1.1 requests.
requestLines = []
requestLines.append(
'%s %s HTTP/1.1\r\n' % (self.method, self.uri))
if not self.persistent:
requestLines.append('Connection: close\r\n')
if TEorCL is not None:
requestLines.append(TEorCL)
for name, values in self.headers.getAllRawHeaders():
requestLines.extend(['%s: %s\r\n' % (name, v) for v in values])
requestLines.append('\r\n')
transport.writeSequence(requestLines)
def _writeToChunked(self, transport):
"""
Write this request to the given transport using chunked
transfer-encoding to frame the body.
"""
self._writeHeaders(transport, 'Transfer-Encoding: chunked\r\n')
encoder = ChunkedEncoder(transport)
encoder.registerProducer(self.bodyProducer, True)
d = self.bodyProducer.startProducing(encoder)
def cbProduced(ignored):
encoder.unregisterProducer()
def ebProduced(err):
encoder._allowNoMoreWrites()
# Don't call the encoder's unregisterProducer because it will write
# a zero-length chunk. This would indicate to the server that the
# request body is complete. There was an error, though, so we
# don't want to do that.
transport.unregisterProducer()
return err
d.addCallbacks(cbProduced, ebProduced)
return d
def _writeToContentLength(self, transport):
"""
Write this request to the given transport using content-length to frame
the body.
"""
self._writeHeaders(
transport,
'Content-Length: %d\r\n' % (self.bodyProducer.length,))
# This Deferred is used to signal an error in the data written to the
# encoder below. It can only errback and it will only do so before too
# many bytes have been written to the encoder and before the producer
# Deferred fires.
finishedConsuming = Deferred()
# This makes sure the producer writes the correct number of bytes for
# the request body.
encoder = LengthEnforcingConsumer(
self.bodyProducer, transport, finishedConsuming)
transport.registerProducer(self.bodyProducer, True)
finishedProducing = self.bodyProducer.startProducing(encoder)
def combine(consuming, producing):
# This Deferred is returned and will be fired when the first of
# consuming or producing fires. If it's cancelled, forward that
# cancellation to the producer.
def cancelConsuming(ign):
finishedProducing.cancel()
ultimate = Deferred(cancelConsuming)
# Keep track of what has happened so far. This initially
# contains None, then an integer uniquely identifying what
# sequence of events happened. See the callbacks and errbacks
# defined below for the meaning of each value.
state = [None]
def ebConsuming(err):
if state == [None]:
# The consuming Deferred failed first. This means the
# overall writeTo Deferred is going to errback now. The
# producing Deferred should not fire later (because the
# consumer should have called stopProducing on the
# producer), but if it does, a callback will be ignored
# and an errback will be logged.
state[0] = 1
ultimate.errback(err)
else:
# The consuming Deferred errbacked after the producing
# Deferred fired. This really shouldn't ever happen.
# If it does, I goofed. Log the error anyway, just so
# there's a chance someone might notice and complain.
log.err(
err,
"Buggy state machine in %r/[%d]: "
"ebConsuming called" % (self, state[0]))
def cbProducing(result):
if state == [None]:
# The producing Deferred succeeded first. Nothing will
# ever happen to the consuming Deferred. Tell the
# encoder we're done so it can check what the producer
# wrote and make sure it was right.
state[0] = 2
try:
encoder._noMoreWritesExpected()
except:
# Fail the overall writeTo Deferred - something the
# producer did was wrong.
ultimate.errback()
else:
# Success - succeed the overall writeTo Deferred.
ultimate.callback(None)
# Otherwise, the consuming Deferred already errbacked. The
# producing Deferred wasn't supposed to fire, but it did
# anyway. It's buggy, but there's not really anything to be
# done about it. Just ignore this result.
def ebProducing(err):
if state == [None]:
# The producing Deferred failed first. This means the
# overall writeTo Deferred is going to errback now.
# Tell the encoder that we're done so it knows to reject
# further writes from the producer (which should not
# happen, but the producer may be buggy).
state[0] = 3
encoder._allowNoMoreWrites()
ultimate.errback(err)
else:
# The producing Deferred failed after the consuming
# Deferred failed. It shouldn't have, so it's buggy.
# Log the exception in case anyone who can fix the code
# is watching.
log.err(err, "Producer is buggy")
consuming.addErrback(ebConsuming)
producing.addCallbacks(cbProducing, ebProducing)
return ultimate
d = combine(finishedConsuming, finishedProducing)
def f(passthrough):
# Regardless of what happens with the overall Deferred, once it
# fires, the producer registered way up above the definition of
# combine should be unregistered.
transport.unregisterProducer()
return passthrough
d.addBoth(f)
return d
def writeTo(self, transport):
"""
Format this L{Request} as an HTTP/1.1 request and write it to the given
transport. If bodyProducer is not None, it will be associated with an
L{IConsumer}.
@return: A L{Deferred} which fires with C{None} when the request has
been completely written to the transport or with a L{Failure} if
there is any problem generating the request bytes.
"""
if self.bodyProducer is not None:
if self.bodyProducer.length is UNKNOWN_LENGTH:
return self._writeToChunked(transport)
else:
return self._writeToContentLength(transport)
else:
self._writeHeaders(transport, None)
return succeed(None)
def stopWriting(self):
"""
Stop writing this request to the transport. This can only be called
after C{writeTo} and before the L{Deferred} returned by C{writeTo}
fires. It should cancel any asynchronous task started by C{writeTo}.
The L{Deferred} returned by C{writeTo} need not be fired if this method
is called.
"""
# If bodyProducer is None, then the Deferred returned by writeTo has
# fired already and this method cannot be called.
_callAppFunction(self.bodyProducer.stopProducing)
class LengthEnforcingConsumer:
"""
An L{IConsumer} proxy which enforces an exact length requirement on the
total data written to it.
@ivar _length: The number of bytes remaining to be written.
@ivar _producer: The L{IBodyProducer} which is writing to this
consumer.
@ivar _consumer: The consumer to which at most C{_length} bytes will be
forwarded.
@ivar _finished: A L{Deferred} which will be fired with a L{Failure} if too
many bytes are written to this consumer.
"""
def __init__(self, producer, consumer, finished):
self._length = producer.length
self._producer = producer
self._consumer = consumer
self._finished = finished
def _allowNoMoreWrites(self):
"""
Indicate that no additional writes are allowed. Attempts to write
after calling this method will be met with an exception.
"""
self._finished = None
def write(self, bytes):
"""
Write C{bytes} to the underlying consumer unless
C{_noMoreWritesExpected} has been called or there are/have been too
many bytes.
"""
if self._finished is None:
# No writes are supposed to happen any more. Try to convince the
# calling code to stop calling this method by calling its
# stopProducing method and then throwing an exception at it. This
# exception isn't documented as part of the API because you're
# never supposed to expect it: only buggy code will ever receive
# it.
self._producer.stopProducing()
raise ExcessWrite()
if len(bytes) <= self._length:
self._length -= len(bytes)
self._consumer.write(bytes)
else:
# No synchronous exception is raised in *this* error path because
# we still have _finished which we can use to report the error to a
# better place than the direct caller of this method (some
# arbitrary application code).
_callAppFunction(self._producer.stopProducing)
self._finished.errback(WrongBodyLength("too many bytes written"))
self._allowNoMoreWrites()
def _noMoreWritesExpected(self):
"""
Called to indicate no more bytes will be written to this consumer.
Check to see that the correct number have been written.
@raise WrongBodyLength: If not enough bytes have been written.
"""
if self._finished is not None:
self._allowNoMoreWrites()
if self._length:
raise WrongBodyLength("too few bytes written")
def makeStatefulDispatcher(name, template):
"""
Given a I{dispatch} name and a function, return a function which can be
used as a method and which, when called, will call another method defined
on the instance and return the result. The other method which is called is
determined by the value of the C{_state} attribute of the instance.
@param name: A string which is used to construct the name of the subsidiary
method to invoke. The subsidiary method is named like C{'_%s_%s' %
(name, _state)}.
@param template: A function object which is used to give the returned
function a docstring.
@return: The dispatcher function.
"""
def dispatcher(self, *args, **kwargs):
func = getattr(self, '_' + name + '_' + self._state, None)
if func is None:
raise RuntimeError(
"%r has no %s method in state %s" % (self, name, self._state))
return func(*args, **kwargs)
dispatcher.__doc__ = template.__doc__
return dispatcher
class Response:
"""
A L{Response} instance describes an HTTP response received from an HTTP
server.
L{Response} should not be subclassed or instantiated.
@ivar _transport: See L{__init__}.
@ivar _bodyProtocol: The L{IProtocol} provider to which the body is
delivered. C{None} before one has been registered with
C{deliverBody}.
@ivar _bodyBuffer: A C{list} of the strings passed to C{bodyDataReceived}
before C{deliverBody} is called. C{None} afterwards.
@ivar _state: Indicates what state this L{Response} instance is in,
particularly with respect to delivering bytes from the response body
to an application-suppled protocol object. This may be one of
C{'INITIAL'}, C{'CONNECTED'}, C{'DEFERRED_CLOSE'}, or C{'FINISHED'},
with the following meanings:
- INITIAL: This is the state L{Response} objects start in. No
protocol has yet been provided and the underlying transport may
still have bytes to deliver to it.
- DEFERRED_CLOSE: If the underlying transport indicates all bytes
have been delivered but no application-provided protocol is yet
available, the L{Response} moves to this state. Data is
buffered and waiting for a protocol to be delivered to.
- CONNECTED: If a protocol is provided when the state is INITIAL,
the L{Response} moves to this state. Any buffered data is
delivered and any data which arrives from the transport
subsequently is given directly to the protocol.
- FINISHED: If a protocol is provided in the DEFERRED_CLOSE state,
the L{Response} moves to this state after delivering all
buffered data to the protocol. Otherwise, if the L{Response} is
in the CONNECTED state, if the transport indicates there is no
more data, the L{Response} moves to this state. Nothing else
can happen once the L{Response} is in this state.
"""
implements(IResponse)
length = UNKNOWN_LENGTH
_bodyProtocol = None
_bodyFinished = False
def __init__(self, version, code, phrase, headers, _transport):
"""
@param version: HTTP version components protocol, major, minor. E.g.
C{('HTTP', 1, 1)} to mean C{'HTTP/1.1'}.
@param code: HTTP status code.
@type code: L{int}
@param phrase: HTTP reason phrase, intended to give a short description
of the HTTP status code.
@param headers: HTTP response headers.
@type headers: L{twisted.web.http_headers.Headers}
@param _transport: The transport which is delivering this response.
"""
self.version = version
self.code = code
self.phrase = phrase
self.headers = headers
self._transport = _transport
self._bodyBuffer = []
self._state = 'INITIAL'
self.request = None
self.previousResponse = None
@classmethod
def _construct(cls, version, code, phrase, headers, _transport, request):
"""
Private constructor.
@param version: See L{__init__}.
@param code: See L{__init__}.
@param phrase: See L{__init__}.
@param headers: See L{__init__}.
@param _transport: See L{__init__}.
@param request: See L{IResponse.request}.
@return: L{Response} instance.
"""
response = Response(version, code, phrase, headers, _transport)
response.request = proxyForInterface(IClientRequest)(request)
return response
def setPreviousResponse(self, previousResponse):
self.previousResponse = previousResponse
def deliverBody(self, protocol):
"""
Dispatch the given L{IProtocol} depending of the current state of the
response.
"""
deliverBody = makeStatefulDispatcher('deliverBody', deliverBody)
def _deliverBody_INITIAL(self, protocol):
"""
Deliver any buffered data to C{protocol} and prepare to deliver any
future data to it. Move to the C{'CONNECTED'} state.
"""
# Now that there's a protocol to consume the body, resume the
# transport. It was previously paused by HTTPClientParser to avoid
# reading too much data before it could be handled.
self._transport.resumeProducing()
protocol.makeConnection(self._transport)
self._bodyProtocol = protocol
for data in self._bodyBuffer:
self._bodyProtocol.dataReceived(data)
self._bodyBuffer = None
self._state = 'CONNECTED'
def _deliverBody_CONNECTED(self, protocol):
"""
It is invalid to attempt to deliver data to a protocol when it is
already being delivered to another protocol.
"""
raise RuntimeError(
"Response already has protocol %r, cannot deliverBody "
"again" % (self._bodyProtocol,))
def _deliverBody_DEFERRED_CLOSE(self, protocol):
"""
Deliver any buffered data to C{protocol} and then disconnect the
protocol. Move to the C{'FINISHED'} state.
"""
# Unlike _deliverBody_INITIAL, there is no need to resume the
# transport here because all of the response data has been received
# already. Some higher level code may want to resume the transport if
# that code expects further data to be received over it.
protocol.makeConnection(self._transport)
for data in self._bodyBuffer:
protocol.dataReceived(data)
self._bodyBuffer = None
protocol.connectionLost(self._reason)
self._state = 'FINISHED'
def _deliverBody_FINISHED(self, protocol):
"""
It is invalid to attempt to deliver data to a protocol after the
response body has been delivered to another protocol.
"""
raise RuntimeError(
"Response already finished, cannot deliverBody now.")
def _bodyDataReceived(self, data):
"""
Called by HTTPClientParser with chunks of data from the response body.
They will be buffered or delivered to the protocol passed to
deliverBody.
"""
_bodyDataReceived = makeStatefulDispatcher('bodyDataReceived',
_bodyDataReceived)
def _bodyDataReceived_INITIAL(self, data):
"""
Buffer any data received for later delivery to a protocol passed to
C{deliverBody}.
| Little or no data should be buffered by this method, since the
transport has been paused and will not be resumed until a protocol
is supplied.
"""
self._bodyBuffer.append(data)
def _bodyDataReceived_CONNECTED(self, data):
"""
Deliver any data received to the protocol to which this L{Response}
is connected.
"""
self._bodyProtocol.dataReceived(data)
def _bodyDataReceived_DEFERRED_CLOSE(self, data):
"""
It is invalid for data to be delivered after it has been indicated
that the response body has been completely delivered.
"""
raise RuntimeError("Cannot receive body data after _bodyDataFinished")
def _bodyDataReceived_FINISHED(self, data):
"""
It is invalid for data to be delivered after the response body has
been delivered to a protocol.
"""
raise RuntimeError("Cannot receive body data after protocol disconnected")
def _bodyDataFinished(self, reason=None):
"""
Called by HTTPClientParser when no more body data is available. If the
optional reason is supplied, this indicates a problem or potential
problem receiving all of the response body.
"""
_bodyDataFinished = makeStatefulDispatcher('bodyDataFinished',
_bodyDataFinished)
def _bodyDataFinished_INITIAL(self, reason=None):
"""
Move to the C{'DEFERRED_CLOSE'} state to wait for a protocol to
which to deliver the response body.
"""
self._state = 'DEFERRED_CLOSE'
if reason is None:
reason = Failure(ResponseDone("Response body fully received"))
self._reason = reason
def _bodyDataFinished_CONNECTED(self, reason=None):
"""
Disconnect the protocol and move to the C{'FINISHED'} state.
"""
if reason is None:
reason = Failure(ResponseDone("Response body fully received"))
self._bodyProtocol.connectionLost(reason)
self._bodyProtocol = None
self._state = 'FINISHED'
def _bodyDataFinished_DEFERRED_CLOSE(self):
"""
It is invalid to attempt to notify the L{Response} of the end of the
response body data more than once.
"""
raise RuntimeError("Cannot finish body data more than once")
def _bodyDataFinished_FINISHED(self):
"""
It is invalid to attempt to notify the L{Response} of the end of the
response body data more than once.
"""
raise RuntimeError("Cannot finish body data after protocol disconnected")
class ChunkedEncoder:
"""
Helper object which exposes L{IConsumer} on top of L{HTTP11ClientProtocol}
for streaming request bodies to the server.
"""
implements(IConsumer)
def __init__(self, transport):
self.transport = transport
def _allowNoMoreWrites(self):
"""
Indicate that no additional writes are allowed. Attempts to write
after calling this method will be met with an exception.
"""
self.transport = None
def registerProducer(self, producer, streaming):
"""
Register the given producer with C{self.transport}.
"""
self.transport.registerProducer(producer, streaming)
def write(self, data):
"""
Write the given request body bytes to the transport using chunked
encoding.
@type data: C{str}
"""
if self.transport is None:
raise ExcessWrite()
self.transport.writeSequence(("%x\r\n" % len(data), data, "\r\n"))
def unregisterProducer(self):
"""
Indicate that the request body is complete and finish the request.
"""
self.write('')
self.transport.unregisterProducer()
self._allowNoMoreWrites()
class TransportProxyProducer:
"""
An L{IPushProducer} implementation which wraps another such thing and
proxies calls to it until it is told to stop.
@ivar _producer: The wrapped L{IPushProducer} provider or C{None} after
this proxy has been stopped.
"""
implements(IPushProducer)
# LineReceiver uses this undocumented attribute of transports to decide
# when to stop calling lineReceived or rawDataReceived (if it finds it to
# be true, it doesn't bother to deliver any more data). Set disconnecting
# to False here and never change it to true so that all data is always
# delivered to us and so that LineReceiver doesn't fail with an
# AttributeError.
disconnecting = False
def __init__(self, producer):
self._producer = producer
def _stopProxying(self):
"""
Stop forwarding calls of L{IPushProducer} methods to the underlying
L{IPushProvider} provider.
"""
self._producer = None
def stopProducing(self):
"""
Proxy the stoppage to the underlying producer, unless this proxy has
been stopped.
"""
if self._producer is not None:
self._producer.stopProducing()
def resumeProducing(self):
"""
Proxy the resumption to the underlying producer, unless this proxy has
been stopped.
"""
if self._producer is not None:
self._producer.resumeProducing()
def pauseProducing(self):
"""
Proxy the pause to the underlying producer, unless this proxy has been
stopped.
"""
if self._producer is not None:
self._producer.pauseProducing()
class HTTP11ClientProtocol(Protocol):
"""
L{HTTP11ClientProtocol} is an implementation of the HTTP 1.1 client
protocol. It supports as few features as possible.
@ivar _parser: After a request is issued, the L{HTTPClientParser} to
which received data making up the response to that request is
delivered.
@ivar _finishedRequest: After a request is issued, the L{Deferred} which
will fire when a L{Response} object corresponding to that request is
available. This allows L{HTTP11ClientProtocol} to fail the request
if there is a connection or parsing problem.
@ivar _currentRequest: After a request is issued, the L{Request}
instance used to make that request. This allows
L{HTTP11ClientProtocol} to stop request generation if necessary (for
example, if the connection is lost).
@ivar _transportProxy: After a request is issued, the
L{TransportProxyProducer} to which C{_parser} is connected. This
allows C{_parser} to pause and resume the transport in a way which
L{HTTP11ClientProtocol} can exert some control over.
@ivar _responseDeferred: After a request is issued, the L{Deferred} from
C{_parser} which will fire with a L{Response} when one has been
received. This is eventually chained with C{_finishedRequest}, but
only in certain cases to avoid double firing that Deferred.
@ivar _state: Indicates what state this L{HTTP11ClientProtocol} instance
is in with respect to transmission of a request and reception of a
response. This may be one of the following strings:
- QUIESCENT: This is the state L{HTTP11ClientProtocol} instances
start in. Nothing is happening: no request is being sent and no
response is being received or expected.
- TRANSMITTING: When a request is made (via L{request}), the
instance moves to this state. L{Request.writeTo} has been used
to start to send a request but it has not yet finished.
- TRANSMITTING_AFTER_RECEIVING_RESPONSE: The server has returned a
complete response but the request has not yet been fully sent
yet. The instance will remain in this state until the request
is fully sent.
- GENERATION_FAILED: There was an error while the request. The
request was not fully sent to the network.
- WAITING: The request was fully sent to the network. The
instance is now waiting for the response to be fully received.
- ABORTING: Application code has requested that the HTTP connection
be aborted.
- CONNECTION_LOST: The connection has been lost.
@ivar _abortDeferreds: A list of C{Deferred} instances that will fire when
the connection is lost.
"""
_state = 'QUIESCENT'
_parser = None
_finishedRequest = None
_currentRequest = None
_transportProxy = None
_responseDeferred = None
def __init__(self, quiescentCallback=lambda c: None):
self._quiescentCallback = quiescentCallback
self._abortDeferreds = []
@property
def state(self):
return self._state
def request(self, request):
"""
Issue C{request} over C{self.transport} and return a L{Deferred} which
will fire with a L{Response} instance or an error.
@param request: The object defining the parameters of the request to
issue.
@type request: L{Request}
@rtype: L{Deferred}
@return: The deferred may errback with L{RequestGenerationFailed} if
the request was not fully written to the transport due to a local
error. It may errback with L{RequestTransmissionFailed} if it was
not fully written to the transport due to a network error. It may
errback with L{ResponseFailed} if the request was sent (not
necessarily received) but some or all of the response was lost. It
may errback with L{RequestNotSent} if it is not possible to send
any more requests using this L{HTTP11ClientProtocol}.
"""
if self._state != 'QUIESCENT':
return fail(RequestNotSent())
self._state = 'TRANSMITTING'
_requestDeferred = maybeDeferred(request.writeTo, self.transport)
def cancelRequest(ign):
# Explicitly cancel the request's deferred if it's still trying to
# write when this request is cancelled.
if self._state in (
'TRANSMITTING', 'TRANSMITTING_AFTER_RECEIVING_RESPONSE'):
_requestDeferred.cancel()
else:
self.transport.abortConnection()
self._disconnectParser(Failure(CancelledError()))
self._finishedRequest = Deferred(cancelRequest)
# Keep track of the Request object in case we need to call stopWriting
# on it.
self._currentRequest = request
self._transportProxy = TransportProxyProducer(self.transport)
self._parser = HTTPClientParser(request, self._finishResponse)
self._parser.makeConnection(self._transportProxy)
self._responseDeferred = self._parser._responseDeferred
def cbRequestWrotten(ignored):
if self._state == 'TRANSMITTING':
self._state = 'WAITING'
self._responseDeferred.chainDeferred(self._finishedRequest)
def ebRequestWriting(err):
if self._state == 'TRANSMITTING':
self._state = 'GENERATION_FAILED'
self.transport.abortConnection()
self._finishedRequest.errback(
Failure(RequestGenerationFailed([err])))
else:
log.err(err, 'Error writing request, but not in valid state '
'to finalize request: %s' % self._state)
_requestDeferred.addCallbacks(cbRequestWrotten, ebRequestWriting)
return self._finishedRequest
def _finishResponse(self, rest):
"""
Called by an L{HTTPClientParser} to indicate that it has parsed a
complete response.
@param rest: A C{str} giving any trailing bytes which were given to
the L{HTTPClientParser} which were not part of the response it
was parsing.
"""
_finishResponse = makeStatefulDispatcher('finishResponse', _finishResponse)
def _finishResponse_WAITING(self, rest):
# Currently the rest parameter is ignored. Don't forget to use it if
# we ever add support for pipelining. And maybe check what trailers
# mean.
if self._state == 'WAITING':
self._state = 'QUIESCENT'
else:
# The server sent the entire response before we could send the
# whole request. That sucks. Oh well. Fire the request()
# Deferred with the response. But first, make sure that if the
# request does ever finish being written that it won't try to fire
# that Deferred.
self._state = 'TRANSMITTING_AFTER_RECEIVING_RESPONSE'
self._responseDeferred.chainDeferred(self._finishedRequest)
# This will happen if we're being called due to connection being lost;
# if so, no need to disconnect parser again, or to call
# _quiescentCallback.
if self._parser is None:
return
reason = ConnectionDone("synthetic!")
connHeaders = self._parser.connHeaders.getRawHeaders('connection', ())
if (('close' in connHeaders) or self._state != "QUIESCENT" or
not self._currentRequest.persistent):
self._giveUp(Failure(reason))
else:
# Just in case we had paused the transport, resume it before
# considering it quiescent again.
self.transport.resumeProducing()
# We call the quiescent callback first, to ensure connection gets
# added back to connection pool before we finish the request.
try:
self._quiescentCallback(self)
except:
# If callback throws exception, just log it and disconnect;
# keeping persistent connections around is an optimisation:
log.err()
self.transport.loseConnection()
self._disconnectParser(reason)
_finishResponse_TRANSMITTING = _finishResponse_WAITING
def _disconnectParser(self, reason):
"""
If there is still a parser, call its C{connectionLost} method with the
given reason. If there is not, do nothing.
@type reason: L{Failure}
"""
if self._parser is not None:
parser = self._parser
self._parser = None
self._currentRequest = None
self._finishedRequest = None
self._responseDeferred = None
# The parser is no longer allowed to do anything to the real
# transport. Stop proxying from the parser's transport to the real
# transport before telling the parser it's done so that it can't do
# anything.
self._transportProxy._stopProxying()
self._transportProxy = None
parser.connectionLost(reason)
def _giveUp(self, reason):
"""
Lose the underlying connection and disconnect the parser with the given
L{Failure}.
Use this method instead of calling the transport's loseConnection
method directly otherwise random things will break.
"""
self.transport.loseConnection()
self._disconnectParser(reason)
def dataReceived(self, bytes):
"""
Handle some stuff from some place.
"""
try:
self._parser.dataReceived(bytes)
except:
self._giveUp(Failure())
def connectionLost(self, reason):
"""
The underlying transport went away. If appropriate, notify the parser
object.
"""
connectionLost = makeStatefulDispatcher('connectionLost', connectionLost)
def _connectionLost_QUIESCENT(self, reason):
"""
Nothing is currently happening. Move to the C{'CONNECTION_LOST'}
state but otherwise do nothing.
"""
self._state = 'CONNECTION_LOST'
def _connectionLost_GENERATION_FAILED(self, reason):
"""
The connection was in an inconsistent state. Move to the
C{'CONNECTION_LOST'} state but otherwise do nothing.
"""
self._state = 'CONNECTION_LOST'
def _connectionLost_TRANSMITTING(self, reason):
"""
Fail the L{Deferred} for the current request, notify the request
object that it does not need to continue transmitting itself, and
move to the C{'CONNECTION_LOST'} state.
"""
self._state = 'CONNECTION_LOST'
self._finishedRequest.errback(
Failure(RequestTransmissionFailed([reason])))
del self._finishedRequest
# Tell the request that it should stop bothering now.
self._currentRequest.stopWriting()
def _connectionLost_TRANSMITTING_AFTER_RECEIVING_RESPONSE(self, reason):
"""
Move to the C{'CONNECTION_LOST'} state.
"""
self._state = 'CONNECTION_LOST'
def _connectionLost_WAITING(self, reason):
"""
Disconnect the response parser so that it can propagate the event as
necessary (for example, to call an application protocol's
C{connectionLost} method, or to fail a request L{Deferred}) and move
to the C{'CONNECTION_LOST'} state.
"""
self._disconnectParser(reason)
self._state = 'CONNECTION_LOST'
def _connectionLost_ABORTING(self, reason):
"""
Disconnect the response parser with a L{ConnectionAborted} failure, and
move to the C{'CONNECTION_LOST'} state.
"""
self._disconnectParser(Failure(ConnectionAborted()))
self._state = 'CONNECTION_LOST'
for d in self._abortDeferreds:
d.callback(None)
self._abortDeferreds = []
def abort(self):
"""
Close the connection and cause all outstanding L{request} L{Deferred}s
to fire with an error.
"""
if self._state == "CONNECTION_LOST":
return succeed(None)
self.transport.loseConnection()
self._state = 'ABORTING'
d = Deferred()
self._abortDeferreds.append(d)
return d | |
cache.go | // Copyright Project Contour Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package status holds pieces for handling status updates propagated from
// the DAG back to Kubernetes
package status
import (
contour_api_v1 "github.com/projectcontour/contour/apis/projectcontour/v1"
"github.com/projectcontour/contour/internal/k8s"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
gatewayapi_v1alpha1 "sigs.k8s.io/gateway-api/apis/v1alpha1"
)
// ConditionType is used to ensure we only use a limited set of possible values
// for DetailedCondition types. It's cast back to a string before sending off to
// HTTPProxy structs, as those use upstream types which we can't alias easily.
type ConditionType string
// ValidCondition is the ConditionType for Valid.
const ValidCondition ConditionType = "Valid"
// NewCache creates a new Cache for holding status updates.
func NewCache(gateway types.NamespacedName) Cache {
return Cache{
proxyUpdates: make(map[types.NamespacedName]*ProxyUpdate),
gatewayRef: gateway,
routeUpdates: make(map[types.NamespacedName]*ConditionsUpdate),
entries: make(map[string]map[types.NamespacedName]CacheEntry),
}
}
type CacheEntry interface {
AsStatusUpdate() k8s.StatusUpdate
ConditionFor(ConditionType) *contour_api_v1.DetailedCondition
}
// Cache holds status updates from the DAG back towards Kubernetes.
// It holds a per-Kind cache, and is intended to be accessed with a
// KindAccessor.
type Cache struct {
proxyUpdates map[types.NamespacedName]*ProxyUpdate
gatewayRef types.NamespacedName
routeUpdates map[types.NamespacedName]*ConditionsUpdate
// Map of cache entry maps, keyed on Kind.
entries map[string]map[types.NamespacedName]CacheEntry
}
// Get returns a pointer to a the cache entry if it exists, nil
// otherwise. The return value is shared between all callers, who
// should take care to cooperate.
func (c *Cache) Get(obj metav1.Object) CacheEntry {
kind := k8s.KindOf(obj)
if _, ok := c.entries[kind]; !ok |
return c.entries[kind][k8s.NamespacedNameOf(obj)]
}
// Put returns an entry to the cache.
func (c *Cache) Put(obj metav1.Object, e CacheEntry) {
kind := k8s.KindOf(obj)
if _, ok := c.entries[kind]; !ok {
c.entries[kind] = make(map[types.NamespacedName]CacheEntry)
}
c.entries[kind][k8s.NamespacedNameOf(obj)] = e
}
// GetStatusUpdates returns a slice of StatusUpdates, ready to be sent off
// to the StatusUpdater by the event handler.
// As more kinds are handled by Cache, we'll update this method.
func (c *Cache) GetStatusUpdates() []k8s.StatusUpdate {
var flattened []k8s.StatusUpdate
for fullname, pu := range c.proxyUpdates {
update := k8s.StatusUpdate{
NamespacedName: fullname,
Resource: contour_api_v1.HTTPProxyGVR,
Mutator: pu,
}
flattened = append(flattened, update)
}
for fullname, routeUpdate := range c.routeUpdates {
update := k8s.StatusUpdate{
NamespacedName: fullname,
Resource: schema.GroupVersionResource{
Group: gatewayapi_v1alpha1.GroupVersion.Group,
Version: gatewayapi_v1alpha1.GroupVersion.Version,
Resource: routeUpdate.Resource,
},
Mutator: routeUpdate,
}
flattened = append(flattened, update)
}
for _, byKind := range c.entries {
for _, e := range byKind {
flattened = append(flattened, e.AsStatusUpdate())
}
}
return flattened
}
// GetProxyUpdates gets the underlying ProxyUpdate objects
// from the cache, used by various things (`internal/contour/metrics.go` and `internal/dag/status_test.go`)
// to retrieve info they need.
// TODO(youngnick)#2969: This could conceivably be replaced with a Walk pattern.
func (c *Cache) GetProxyUpdates() []*ProxyUpdate {
var allUpdates []*ProxyUpdate
for _, pu := range c.proxyUpdates {
allUpdates = append(allUpdates, pu)
}
return allUpdates
}
// GetRouteUpdates gets the underlying ConditionsUpdate objects from the cache.
func (c *Cache) GetRouteUpdates() []*ConditionsUpdate {
var allUpdates []*ConditionsUpdate
for _, conditionsUpdate := range c.routeUpdates {
allUpdates = append(allUpdates, conditionsUpdate)
}
return allUpdates
}
| {
c.entries[kind] = make(map[types.NamespacedName]CacheEntry)
} |
notifications.rs | use crate::ui::components::container::*;
use oxygengine::user_interface::raui::{core::prelude::*, material::prelude::*};
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
const DEFAULT_DURATION: Scalar = 2.0;
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct NotificationShow {
pub text: String,
pub side: bool,
pub height: Option<Scalar>,
pub duration: Option<Scalar>,
}
#[derive(MessageData, Debug, Clone)]
pub enum NotificationSignal {
None,
Register,
Unregister,
Show(NotificationShow),
}
impl Default for NotificationSignal {
fn default() -> Self {
Self::None
}
}
#[derive(PropsData, Debug, Default, Clone, Serialize, Deserialize)]
pub struct NotificationsState(pub VecDeque<NotificationShow>);
#[derive(PropsData, Debug, Clone, Serialize, Deserialize)]
pub struct NotificationsProps {
#[serde(default)]
pub side_margin: Scalar,
#[serde(default)]
pub side_external_margin: Scalar,
#[serde(default)]
pub side_internal_margin: Scalar,
#[serde(default)]
pub side_default_height: Scalar,
#[serde(default)]
pub external_margin: Scalar,
#[serde(default)]
pub internal_margin: Scalar,
#[serde(default)]
pub default_height: Scalar,
}
impl Default for NotificationsProps {
fn default() -> Self {
Self {
side_margin: 128.0,
side_external_margin: 16.0,
side_internal_margin: 4.0,
side_default_height: 26.0,
external_margin: 64.0,
internal_margin: 16.0,
default_height: 48.0,
}
}
}
fn make_animation(duration: Scalar) -> Animation {
Animation::Sequence(vec![
Animation::Value(AnimatedValue {
name: "fade-in".to_owned(),
duration: 0.25,
}),
Animation::Value(AnimatedValue {
name: "delay".to_owned(),
duration,
}),
Animation::Value(AnimatedValue {
name: "fade-out".to_owned(),
duration: 0.25,
}),
Animation::Message("next".to_owned()),
])
}
fn use_notifications(context: &mut WidgetContext) {
context.life_cycle.mount(|context| {
drop(context.state.write(NotificationsState::default()));
context.signals.write(NotificationSignal::Register);
});
context.life_cycle.unmount(|context| {
context.signals.write(NotificationSignal::Unregister);
});
context.life_cycle.change(|context| {
for msg in context.messenger.messages { | let duration = state
.0
.front()
.unwrap()
.duration
.unwrap_or(DEFAULT_DURATION);
drop(context.animator.change("", Some(make_animation(duration))));
}
drop(context.state.write(state));
} else if let Some(AnimationMessage(msg)) = msg.as_any().downcast_ref() {
if msg == "next" {
let mut state = context.state.read_cloned_or_default::<NotificationsState>();
state.0.pop_front();
if !state.0.is_empty() {
let duration = state
.0
.front()
.unwrap()
.duration
.unwrap_or(DEFAULT_DURATION);
drop(context.animator.change("", Some(make_animation(duration))));
}
drop(context.state.write(state));
}
}
}
});
}
#[pre_hooks(use_notifications)]
pub fn notifications(mut context: WidgetContext) -> WidgetNode {
let WidgetContext {
key,
props,
state,
animator,
..
} = context;
let NotificationsProps {
side_margin,
side_external_margin,
side_internal_margin,
side_default_height,
external_margin,
internal_margin,
default_height,
} = props.read_cloned_or_default();
let phase = {
let a = animator.value_progress_factor_or_zero("", "fade-in");
let b = animator.value_progress_factor_or_zero("", "fade-out");
a - b
};
let item = if let Ok(state) = state.read::<NotificationsState>() {
if let Some(state) = state.0.front() {
let height = state.height.unwrap_or_else(|| {
if state.side {
side_default_height
} else {
default_height
}
});
let mut props = Props::new(ContainerProps {
variant: "dark".to_owned(),
canvas_color: None,
internal_margin: if state.side {
(0.0, 40.0).into()
} else {
0.0.into()
},
..Default::default()
});
if state.side {
props.write(ContentBoxItemLayout {
anchors: Rect {
left: lerp(1.0, 0.0, phase),
right: 1.0,
top: 0.0,
bottom: 0.0,
},
offset: Vec2 {
x: 0.0,
y: side_external_margin,
},
align: Vec2 { x: 1.0, y: 0.0 },
margin: Rect {
left: side_margin,
right: 0.0,
top: 0.0,
bottom: -height,
},
..Default::default()
});
} else {
props.write(ContentBoxItemLayout {
anchors: Rect {
left: 0.0,
right: 1.0,
top: 1.0,
bottom: 1.0,
},
offset: Vec2 {
x: 0.0,
y: -external_margin * phase,
},
align: Vec2 { x: 0.5, y: 1.0 },
margin: Rect {
left: external_margin,
right: external_margin,
top: -height,
..Default::default()
},
..Default::default()
});
}
let size_props = SizeBoxProps {
width: SizeBoxSizeValue::Fill,
height: SizeBoxSizeValue::Fill,
margin: if state.side {
Rect {
left: side_internal_margin,
right: side_internal_margin,
top: side_internal_margin,
bottom: side_internal_margin,
}
} else {
Rect {
left: internal_margin,
right: internal_margin,
top: internal_margin,
bottom: internal_margin,
}
},
..Default::default()
};
let text_props = TextPaperProps {
text: state.text.to_owned(),
variant: "3".to_owned(),
use_main_color: true,
..Default::default()
};
widget! {
(#{"item"} container: {props} | {WidgetAlpha(phase)} [
(#{"wrapper"} size_box: {size_props} {
content = (#{"text"} text_paper: {text_props})
})
])
}
} else {
widget! {()}
}
} else {
widget! {()}
};
widget! {
(#{key} content_box: {props.clone()} [
{item}
])
}
} | if let Some(NotificationSignal::Show(data)) = msg.as_any().downcast_ref() {
let mut state = context.state.read_cloned_or_default::<NotificationsState>();
state.0.push_back(data.clone());
if !context.animator.has("") { |
auth.module.ts | import { Module } from '@nestjs/common';
import { AuthController } from './auth.controller'; | import { PassportModule } from '@nestjs/passport';
import { JwtStrategy } from './jwt-strategy';
@Module({
imports: [
PassportModule.register({ defaultStrategy: 'jwt' }),
JwtModule.register({
secret: 'topSecret',
signOptions: {
expiresIn: 3600,
},
}),
TypeOrmModule.forFeature([UserRepository]), // now we can consume this in the service
],
controllers: [AuthController],
providers: [AuthService, JwtStrategy],
exports: [JwtStrategy, PassportModule], // now this can be used in other modules
})
export class AuthModule {} | import { AuthService } from './auth.service';
import { TypeOrmModule } from '@nestjs/typeorm';
import { UserRepository } from './user.repository';
import { JwtModule } from '@nestjs/jwt'; |
nav.go | package main
import (
"bufio"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
times "gopkg.in/djherbis/times.v1"
)
type linkState byte
const (
notLink linkState = iota
working
broken
)
type file struct {
os.FileInfo
linkState linkState
path string
dirCount int
accessTime time.Time
changeTime time.Time
ext string
}
func readdir(path string) ([]*file, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
names, err := f.Readdirnames(-1)
f.Close()
files := make([]*file, 0, len(names))
for _, fname := range names {
fpath := filepath.Join(path, fname)
lstat, err := os.Lstat(fpath)
if os.IsNotExist(err) {
continue
}
if err != nil {
return files, err
}
var linkState linkState
if lstat.Mode()&os.ModeSymlink != 0 {
stat, err := os.Stat(fpath)
if err == nil {
linkState = working
lstat = stat
} else {
linkState = broken
}
}
ts := times.Get(lstat)
at := ts.AccessTime()
var ct time.Time
// from times docs: ChangeTime() panics unless HasChangeTime() is true
if ts.HasChangeTime() {
ct = ts.ChangeTime()
} else {
// fall back to ModTime if ChangeTime cannot be determined
ct = lstat.ModTime()
}
// returns an empty string if extension could not be determined
// i.e. directories, filenames without extensions
ext := filepath.Ext(fpath)
files = append(files, &file{
FileInfo: lstat,
linkState: linkState,
path: fpath,
dirCount: -1,
accessTime: at,
changeTime: ct,
ext: ext,
})
}
return files, err
}
type dir struct {
loading bool // directory is loading from disk
loadTime time.Time // current loading or last load time
ind int // index of current entry in files
pos int // position of current entry in ui
path string // full path of directory
files []*file // displayed files in directory including or excluding hidden ones
allFiles []*file // all files in directory including hidden ones (same array as files)
sortType sortType // sort method and options from last sort
noPerm bool // whether lf has no permission to open the directory
}
func newDir(path string) *dir {
time := time.Now()
files, err := readdir(path)
if err != nil {
log.Printf("reading directory: %s", err)
}
return &dir{
loadTime: time,
path: path,
files: files,
allFiles: files,
noPerm: os.IsPermission(err),
}
}
func (dir *dir) sort() {
dir.sortType = gOpts.sortType
dir.files = dir.allFiles
switch gOpts.sortType.method {
case naturalSort:
sort.SliceStable(dir.files, func(i, j int) bool {
return naturalLess(strings.ToLower(dir.files[i].Name()), strings.ToLower(dir.files[j].Name()))
})
case nameSort:
sort.SliceStable(dir.files, func(i, j int) bool {
return strings.ToLower(dir.files[i].Name()) < strings.ToLower(dir.files[j].Name())
})
case sizeSort:
sort.SliceStable(dir.files, func(i, j int) bool {
return dir.files[i].Size() < dir.files[j].Size()
})
case timeSort:
sort.SliceStable(dir.files, func(i, j int) bool {
return dir.files[i].ModTime().Before(dir.files[j].ModTime())
})
case atimeSort:
sort.SliceStable(dir.files, func(i, j int) bool {
return dir.files[i].accessTime.Before(dir.files[j].accessTime)
})
case ctimeSort:
sort.SliceStable(dir.files, func(i, j int) bool {
return dir.files[i].changeTime.Before(dir.files[j].changeTime)
})
case extSort:
sort.SliceStable(dir.files, func(i, j int) bool {
leftExt := strings.ToLower(dir.files[i].ext)
rightExt := strings.ToLower(dir.files[j].ext)
// if the extension could not be determined (directories, files without)
// use a zero byte so that these files can be ranked higher
if leftExt == "" {
leftExt = "\x00"
}
if rightExt == "" {
rightExt = "\x00"
}
// in order to also have natural sorting with the filenames
// combine the name with the ext but have the ext at the front
left := leftExt + strings.ToLower(dir.files[i].Name())
right := rightExt + strings.ToLower(dir.files[j].Name())
return left < right
})
}
if gOpts.sortType.option&reverseSort != 0 {
for i, j := 0, len(dir.files)-1; i < j; i, j = i+1, j-1 {
dir.files[i], dir.files[j] = dir.files[j], dir.files[i]
}
}
if gOpts.sortType.option&dirfirstSort != 0 {
sort.SliceStable(dir.files, func(i, j int) bool {
if dir.files[i].IsDir() == dir.files[j].IsDir() {
return i < j
}
return dir.files[i].IsDir()
})
}
// when hidden option is disabled, we move hidden files to the
// beginning of our file list and then set the beginning of displayed
// files to the first non-hidden file in the list
if gOpts.sortType.option&hiddenSort == 0 {
sort.SliceStable(dir.files, func(i, j int) bool {
if isHidden(dir.files[i]) && isHidden(dir.files[j]) {
return i < j
}
return isHidden(dir.files[i])
})
for i, f := range dir.files {
if !isHidden(f) {
dir.files = dir.files[i:]
return
}
}
dir.files = dir.files[len(dir.files):]
}
}
func (dir *dir) name() string {
if len(dir.files) == 0 {
return ""
}
return dir.files[dir.ind].Name()
}
func (dir *dir) sel(name string, height int) {
if len(dir.files) == 0 {
dir.ind, dir.pos = 0, 0
return
}
dir.ind = min(dir.ind, len(dir.files)-1)
if dir.files[dir.ind].Name() != name {
for i, f := range dir.files {
if f.Name() == name {
dir.ind = i
break
}
}
}
edge := min(min(height/2, gOpts.scrolloff), len(dir.files)-dir.ind-1)
dir.pos = min(dir.ind, height-edge-1)
}
type nav struct {
dirs []*dir
copyBytes int64
copyTotal int64
copyUpdate int
moveCount int
moveTotal int
moveUpdate int
deleteCount int
deleteTotal int
deleteUpdate int
copyBytesChan chan int64
copyTotalChan chan int64
moveCountChan chan int
moveTotalChan chan int
deleteCountChan chan int
deleteTotalChan chan int
dirChan chan *dir
regChan chan *reg
dirCache map[string]*dir
regCache map[string]*reg
saves map[string]bool
marks map[string]string
renameCache []string
selections map[string]int
selectionInd int
height int
find string
findBack bool
search string
searchBack bool
searchInd int
searchPos int
}
func (nav *nav) loadDir(path string) *dir {
d, ok := nav.dirCache[path]
if !ok {
go func() {
d := newDir(path)
d.sort()
d.ind, d.pos = 0, 0
nav.dirChan <- d
}()
d := &dir{loading: true, path: path, sortType: gOpts.sortType}
nav.dirCache[path] = d
return d
}
s, err := os.Stat(d.path)
if err != nil {
return d
}
switch {
case s.ModTime().After(d.loadTime):
go func() {
d.loadTime = time.Now()
nd := newDir(path)
nd.sort()
nd.sel(d.name(), nav.height)
nav.dirChan <- nd
}()
case d.sortType != gOpts.sortType:
go func() {
d.loading = true
name := d.name()
d.sort()
d.sel(name, nav.height)
d.loading = false
nav.dirChan <- d
}()
}
return d
}
func (nav *nav) getDirs(wd string) {
var dirs []*dir
for curr, base := wd, ""; !isRoot(base); curr, base = filepath.Dir(curr), filepath.Base(curr) {
dir := nav.loadDir(curr)
dir.sel(base, nav.height)
dirs = append(dirs, dir)
}
for i, j := 0, len(dirs)-1; i < j; i, j = i+1, j-1 {
dirs[i], dirs[j] = dirs[j], dirs[i]
}
nav.dirs = dirs
}
func newNav(height int) *nav {
wd, err := os.Getwd()
if err != nil {
log.Printf("getting current directory: %s", err)
}
nav := &nav{
copyBytesChan: make(chan int64, 1024),
copyTotalChan: make(chan int64, 1024),
moveCountChan: make(chan int, 1024),
moveTotalChan: make(chan int, 1024),
deleteCountChan: make(chan int, 1024),
deleteTotalChan: make(chan int, 1024),
dirChan: make(chan *dir),
regChan: make(chan *reg),
dirCache: make(map[string]*dir),
regCache: make(map[string]*reg),
saves: make(map[string]bool),
marks: make(map[string]string),
renameCache: make([]string, 2),
selections: make(map[string]int),
selectionInd: 0,
height: height,
}
nav.getDirs(wd)
return nav
}
func (nav *nav) renew() {
for _, d := range nav.dirs {
go func(d *dir) {
s, err := os.Stat(d.path)
if err != nil {
log.Printf("getting directory info: %s", err)
return
}
if d.loadTime.After(s.ModTime()) {
return
}
d.loadTime = time.Now()
nd := newDir(d.path)
nd.sort()
nav.dirChan <- nd
}(d)
}
for m := range nav.selections {
if _, err := os.Stat(m); os.IsNotExist(err) {
delete(nav.selections, m)
}
}
if len(nav.selections) == 0 {
nav.selectionInd = 0
}
}
func (nav *nav) reload() error {
nav.dirCache = make(map[string]*dir)
nav.regCache = make(map[string]*reg)
wd, err := os.Getwd()
if err != nil {
return fmt.Errorf("getting current directory: %s", err)
}
curr, err := nav.currFile()
nav.getDirs(wd)
if err == nil {
last := nav.dirs[len(nav.dirs)-1]
last.files = append(last.files, curr)
}
return nil
}
func (nav *nav) position() {
path := nav.currDir().path
for i := len(nav.dirs) - 2; i >= 0; i-- {
nav.dirs[i].sel(filepath.Base(path), nav.height)
path = filepath.Dir(path)
}
}
func (nav *nav) preview() {
curr, err := nav.currFile()
if err != nil {
return
}
var reader io.Reader
if len(gOpts.previewer) != 0 {
cmd := exec.Command(gOpts.previewer, curr.path, strconv.Itoa(nav.height))
out, err := cmd.StdoutPipe()
if err != nil {
log.Printf("previewing file: %s", err)
}
if err := cmd.Start(); err != nil {
log.Printf("previewing file: %s", err)
}
defer cmd.Wait()
defer out.Close()
reader = out
} else {
f, err := os.Open(curr.path)
if err != nil {
log.Printf("opening file: %s", err)
}
defer f.Close()
reader = f
}
reg := ®{loadTime: time.Now(), path: curr.path}
buf := bufio.NewScanner(reader)
for i := 0; i < nav.height && buf.Scan(); i++ {
for _, r := range buf.Text() {
if r == 0 {
reg.lines = []string{"\033[7mbinary\033[0m"}
nav.regChan <- reg
return
}
}
reg.lines = append(reg.lines, buf.Text())
}
if buf.Err() != nil {
log.Printf("loading file: %s", buf.Err())
}
nav.regChan <- reg
}
func (nav *nav) loadReg(path string) *reg {
r, ok := nav.regCache[path]
if !ok {
go nav.preview()
r := ®{loading: true, path: path}
nav.regCache[path] = r
return r
}
s, err := os.Stat(r.path)
if err != nil {
return r
}
if s.ModTime().After(r.loadTime) {
r.loadTime = time.Now()
go nav.preview()
}
return r
}
func (nav *nav) sort() {
for _, d := range nav.dirs {
name := d.name()
d.sort()
d.sel(name, nav.height)
}
}
func (nav *nav) up(dist int) {
dir := nav.currDir()
if dir.ind == 0 {
if gOpts.wrapscroll {
nav.bottom()
}
return
}
dir.ind -= dist
dir.ind = max(0, dir.ind)
dir.pos -= dist
edge := min(min(nav.height/2, gOpts.scrolloff), dir.ind)
dir.pos = max(dir.pos, edge)
}
func (nav *nav) down(dist int) {
dir := nav.currDir()
maxind := len(dir.files) - 1
if dir.ind >= maxind {
if gOpts.wrapscroll {
nav.top()
}
return
}
dir.ind += dist
dir.ind = min(maxind, dir.ind)
dir.pos += dist
edge := min(min(nav.height/2, gOpts.scrolloff), maxind-dir.ind)
// use a smaller value when the height is even and scrolloff is maxed
// in order to stay at the same row as much as possible while up/down
edge = min(edge, nav.height/2+nav.height%2-1)
dir.pos = min(dir.pos, nav.height-edge-1)
dir.pos = min(dir.pos, maxind)
}
func (nav *nav) updir() error {
if len(nav.dirs) <= 1 {
return nil
}
dir := nav.currDir()
nav.dirs = nav.dirs[:len(nav.dirs)-1]
if err := os.Chdir(filepath.Dir(dir.path)); err != nil {
return fmt.Errorf("updir: %s", err)
}
return nil
}
func (nav *nav) open() error {
curr, err := nav.currFile()
if err != nil {
return fmt.Errorf("open: %s", err)
}
path := curr.path
dir := nav.loadDir(path)
nav.dirs = append(nav.dirs, dir)
if err := os.Chdir(path); err != nil {
return fmt.Errorf("open: %s", err)
}
return nil
}
func (nav *nav) top() {
dir := nav.currDir()
dir.ind = 0
dir.pos = 0
}
func (nav *nav) bottom() {
dir := nav.currDir()
dir.ind = len(dir.files) - 1
dir.pos = min(dir.ind, nav.height-1)
}
func (nav *nav) toggleSelection(path string) {
if _, ok := nav.selections[path]; ok {
delete(nav.selections, path)
if len(nav.selections) == 0 {
nav.selectionInd = 0
}
} else {
nav.selections[path] = nav.selectionInd
nav.selectionInd++
}
}
func (nav *nav) toggle() {
curr, err := nav.currFile()
if err != nil {
return
}
nav.toggleSelection(curr.path)
nav.down(1)
}
func (nav *nav) invert() {
last := nav.currDir()
for _, f := range last.files {
path := filepath.Join(last.path, f.Name())
nav.toggleSelection(path)
}
}
func (nav *nav) unselect() {
nav.selections = make(map[string]int)
nav.selectionInd = 0
}
func (nav *nav) save(cp bool) error {
list, err := nav.currFileOrSelections()
if err != nil {
return err
}
if err := saveFiles(list, cp); err != nil {
return err
}
nav.saves = make(map[string]bool)
for _, f := range list {
nav.saves[f] = cp
}
return nil
}
func (nav *nav) copyAsync(ui *ui, srcs []string, dstDir string) {
echo := &callExpr{"echoerr", []string{""}, 1}
_, err := os.Stat(dstDir)
if os.IsNotExist(err) {
echo.args[0] = err.Error()
ui.exprChan <- echo
return
}
total, err := copySize(srcs)
if err != nil {
echo.args[0] = err.Error()
ui.exprChan <- echo
return
}
nav.copyTotalChan <- total
nums, errs := copyAll(srcs, dstDir)
errCount := 0
loop:
for {
select {
case n := <-nums:
nav.copyBytesChan <- n
case err, ok := <-errs:
if !ok {
break loop
}
errCount++
echo.args[0] = fmt.Sprintf("[%d] %s", errCount, err)
ui.exprChan <- echo
}
}
nav.copyTotalChan <- -total
if err := remote("send load"); err != nil {
errCount++
echo.args[0] = fmt.Sprintf("[%d] %s", errCount, err)
ui.exprChan <- echo
}
}
func (nav *nav) moveAsync(ui *ui, srcs []string, dstDir string) {
echo := &callExpr{"echoerr", []string{""}, 1}
_, err := os.Stat(dstDir)
if os.IsNotExist(err) {
echo.args[0] = err.Error()
ui.exprChan <- echo
return
}
nav.moveTotalChan <- len(srcs)
errCount := 0
for _, src := range srcs {
nav.moveCountChan <- 1
srcStat, err := os.Stat(src)
if err != nil {
errCount++
echo.args[0] = fmt.Sprintf("[%d] %s", errCount, err)
ui.exprChan <- echo
continue
}
dst := filepath.Join(dstDir, filepath.Base(src))
dstStat, err := os.Stat(dst)
if os.SameFile(srcStat, dstStat) {
errCount++
echo.args[0] = fmt.Sprintf("[%d] rename %s %s: source and destination are the same file", errCount, src, dst)
ui.exprChan <- echo
continue
} else if !os.IsNotExist(err) {
var newPath string
for i := 1; !os.IsNotExist(err); i++ {
newPath = fmt.Sprintf("%s.~%d~", dst, i)
_, err = os.Stat(newPath)
}
dst = newPath
}
if err := os.Rename(src, dst); err != nil {
errCount++
echo.args[0] = fmt.Sprintf("[%d] %s", errCount, err)
ui.exprChan <- echo
}
}
nav.moveTotalChan <- -len(srcs)
if err := remote("send load"); err != nil {
errCount++
echo.args[0] = fmt.Sprintf("[%d] %s", errCount, err)
ui.exprChan <- echo
}
}
func (nav *nav) paste(ui *ui) error {
srcs, cp, err := loadFiles()
if err != nil {
return err
}
if len(srcs) == 0 {
return errors.New("no file in copy/cut buffer")
}
dstDir := nav.currDir().path
if cp {
go nav.copyAsync(ui, srcs, dstDir)
} else {
go nav.moveAsync(ui, srcs, dstDir)
}
if err := saveFiles(nil, false); err != nil {
return fmt.Errorf("clearing copy/cut buffer: %s", err)
}
if err := remote("send sync"); err != nil {
return fmt.Errorf("paste: %s", err)
}
return nil
}
func (nav *nav) del(ui *ui) error {
list, err := nav.currFileOrSelections()
if err != nil {
return err
}
go func() {
echo := &callExpr{"echoerr", []string{""}, 1}
errCount := 0
nav.deleteTotalChan <- len(list)
for _, path := range list {
nav.deleteCountChan <- 1
if err := os.RemoveAll(path); err != nil {
errCount++
echo.args[0] = fmt.Sprintf("[%d] %s", errCount, err)
ui.exprChan <- echo
}
}
nav.deleteTotalChan <- -len(list)
if err := remote("send load"); err != nil {
errCount++
echo.args[0] = fmt.Sprintf("[%d] %s", errCount, err)
ui.exprChan <- echo
}
}()
return nil
}
func (nav *nav) rename() error {
oldPath := nav.renameCache[0]
newPath := nav.renameCache[1]
dir, _ := filepath.Split(newPath)
os.MkdirAll(dir, os.ModePerm)
if _, err := os.Stat(newPath); err == nil { // file exists
if err := os.Remove(newPath); err != nil {
return err
}
}
if err := os.Rename(oldPath, newPath); err != nil {
return err
}
// TODO: change selection
if err := nav.sel(newPath); err != nil {
return err
}
return nil
}
func (nav *nav) sync() error {
list, cp, err := loadFiles()
if err != nil {
return err
}
nav.saves = make(map[string]bool)
for _, f := range list {
nav.saves[f] = cp
}
return nav.readMarks()
}
func (nav *nav) cd(wd string) error {
wd = strings.Replace(wd, "~", gUser.HomeDir, -1)
wd = filepath.Clean(wd)
if !filepath.IsAbs(wd) {
wd = filepath.Join(nav.currDir().path, wd)
}
if err := os.Chdir(wd); err != nil {
return fmt.Errorf("cd: %s", err)
}
nav.getDirs(wd)
return nil
}
func (nav *nav) sel(path string) error {
path = strings.Replace(path, "~", gUser.HomeDir, -1)
path = filepath.Clean(path)
lstat, err := os.Stat(path)
if err != nil {
return fmt.Errorf("select: %s", err)
}
dir := filepath.Dir(path)
if err := nav.cd(dir); err != nil {
return fmt.Errorf("select: %s", err)
}
base := filepath.Base(path)
last := nav.dirs[len(nav.dirs)-1]
if last.loading {
last.files = append(last.files, &file{FileInfo: lstat})
} else {
last.sel(base, nav.height)
}
return nil
}
func (nav *nav) globSel(pattern string, invert bool) error {
curDir := nav.currDir()
anyMatches := false
for i := 0; i < len(curDir.files); i++ {
match, err := filepath.Match(pattern, curDir.files[i].Name())
if err != nil {
return fmt.Errorf("glob-select: %s", err)
}
if match {
anyMatches = true
fpath := filepath.Join(curDir.path, curDir.files[i].Name())
if _, ok := nav.selections[fpath]; ok == invert {
nav.toggleSelection(fpath)
}
}
}
if !anyMatches {
return fmt.Errorf("glob-select: pattern not found: %s", pattern)
}
return nil
}
func findMatch(name, pattern string) bool {
if gOpts.ignorecase |
if gOpts.ignoredia {
lpattern := removeDiacritics(pattern)
if !gOpts.smartdia || lpattern == pattern {
pattern = lpattern
name = removeDiacritics(name)
}
}
if gOpts.anchorfind {
return strings.HasPrefix(name, pattern)
}
return strings.Contains(name, pattern)
}
func (nav *nav) findSingle() int {
count := 0
index := 0
last := nav.currDir()
for i := 0; i < len(last.files); i++ {
if findMatch(last.files[i].Name(), nav.find) {
count++
if count > 1 {
return count
}
index = i
}
}
if count == 1 {
if index > last.ind {
nav.down(index - last.ind)
} else {
nav.up(last.ind - index)
}
}
return count
}
func (nav *nav) findNext() bool {
last := nav.currDir()
for i := last.ind + 1; i < len(last.files); i++ {
if findMatch(last.files[i].Name(), nav.find) {
nav.down(i - last.ind)
return true
}
}
if gOpts.wrapscan {
for i := 0; i < last.ind; i++ {
if findMatch(last.files[i].Name(), nav.find) {
nav.up(last.ind - i)
return true
}
}
}
return false
}
func (nav *nav) findPrev() bool {
last := nav.currDir()
for i := last.ind - 1; i >= 0; i-- {
if findMatch(last.files[i].Name(), nav.find) {
nav.up(last.ind - i)
return true
}
}
if gOpts.wrapscan {
for i := len(last.files) - 1; i > last.ind; i-- {
if findMatch(last.files[i].Name(), nav.find) {
nav.down(i - last.ind)
return true
}
}
}
return false
}
func searchMatch(name, pattern string) (matched bool, err error) {
if gOpts.ignorecase {
lpattern := strings.ToLower(pattern)
if !gOpts.smartcase || lpattern == pattern {
pattern = lpattern
name = strings.ToLower(name)
}
}
if gOpts.ignoredia {
lpattern := removeDiacritics(pattern)
if !gOpts.smartdia || lpattern == pattern {
pattern = lpattern
name = removeDiacritics(name)
}
}
if gOpts.globsearch {
return filepath.Match(pattern, name)
}
return strings.Contains(name, pattern), nil
}
func (nav *nav) searchNext() error {
last := nav.currDir()
for i := last.ind + 1; i < len(last.files); i++ {
matched, err := searchMatch(last.files[i].Name(), nav.search)
if err != nil {
return err
}
if matched {
nav.down(i - last.ind)
return nil
}
}
if gOpts.wrapscan {
for i := 0; i < last.ind; i++ {
matched, err := searchMatch(last.files[i].Name(), nav.search)
if err != nil {
return err
}
if matched {
nav.up(last.ind - i)
return nil
}
}
}
return nil
}
func (nav *nav) searchPrev() error {
last := nav.currDir()
for i := last.ind - 1; i >= 0; i-- {
matched, err := searchMatch(last.files[i].Name(), nav.search)
if err != nil {
return err
}
if matched {
nav.up(last.ind - i)
return nil
}
}
if gOpts.wrapscan {
for i := len(last.files) - 1; i > last.ind; i-- {
matched, err := searchMatch(last.files[i].Name(), nav.search)
if err != nil {
return err
}
if matched {
nav.down(i - last.ind)
return nil
}
}
}
return nil
}
func (nav *nav) removeMark(mark string) error {
if _, ok := nav.marks[mark]; ok {
delete(nav.marks, mark)
return nil
}
return fmt.Errorf("no such mark")
}
func (nav *nav) readMarks() error {
nav.marks = make(map[string]string)
f, err := os.Open(gMarksPath)
if os.IsNotExist(err) {
return nil
}
if err != nil {
return fmt.Errorf("opening marks file: %s", err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
toks := strings.SplitN(scanner.Text(), ":", 2)
if _, ok := nav.marks[toks[0]]; !ok {
nav.marks[toks[0]] = toks[1]
}
}
if err := scanner.Err(); err != nil {
return fmt.Errorf("reading marks file: %s", err)
}
return nil
}
func (nav *nav) writeMarks() error {
if err := os.MkdirAll(filepath.Dir(gMarksPath), os.ModePerm); err != nil {
return fmt.Errorf("creating data directory: %s", err)
}
f, err := os.Create(gMarksPath)
if err != nil {
return fmt.Errorf("creating marks file: %s", err)
}
defer f.Close()
var keys []string
for k := range nav.marks {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
_, err = f.WriteString(fmt.Sprintf("%s:%s\n", k, nav.marks[k]))
if err != nil {
return fmt.Errorf("writing marks file: %s", err)
}
}
return nil
}
func (nav *nav) currDir() *dir {
return nav.dirs[len(nav.dirs)-1]
}
func (nav *nav) currFile() (*file, error) {
last := nav.dirs[len(nav.dirs)-1]
if len(last.files) == 0 {
return nil, fmt.Errorf("empty directory")
}
return last.files[last.ind], nil
}
type indexedSelections struct {
paths []string
indices []int
}
func (m indexedSelections) Len() int { return len(m.paths) }
func (m indexedSelections) Swap(i, j int) {
m.paths[i], m.paths[j] = m.paths[j], m.paths[i]
m.indices[i], m.indices[j] = m.indices[j], m.indices[i]
}
func (m indexedSelections) Less(i, j int) bool { return m.indices[i] < m.indices[j] }
func (nav *nav) currSelections() []string {
paths := make([]string, 0, len(nav.selections))
indices := make([]int, 0, len(nav.selections))
for path, index := range nav.selections {
paths = append(paths, path)
indices = append(indices, index)
}
sort.Sort(indexedSelections{paths: paths, indices: indices})
return paths
}
func (nav *nav) currFileOrSelections() (list []string, err error) {
if len(nav.selections) == 0 {
curr, err := nav.currFile()
if err != nil {
return nil, errors.New("no file selected")
}
return []string{curr.path}, nil
}
return nav.currSelections(), nil
}
| {
lpattern := strings.ToLower(pattern)
if !gOpts.smartcase || lpattern == pattern {
pattern = lpattern
name = strings.ToLower(name)
}
} |
32.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory(require('@carbon/icon-helpers'), require('prop-types'), require('react')) :
typeof define === 'function' && define.amd ? define(['@carbon/icon-helpers', 'prop-types', 'react'], factory) :
(global.QCU132 = factory(global.CarbonIconHelpers,global.PropTypes,global.React));
}(this, (function (iconHelpers,PropTypes,React) { 'use strict';
PropTypes = PropTypes && PropTypes.hasOwnProperty('default') ? PropTypes['default'] : PropTypes;
React = React && React.hasOwnProperty('default') ? React['default'] : React;
function _typeof(obj) { if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; } return _typeof(obj); }
function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; var ownKeys = Object.keys(source); if (typeof Object.getOwnPropertySymbols === 'function') { ownKeys = ownKeys.concat(Object.getOwnPropertySymbols(source).filter(function (sym) { return Object.getOwnPropertyDescriptor(source, sym).enumerable; })); } ownKeys.forEach(function (key) { _defineProperty(target, key, source[key]); }); } return target; }
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
function _objectWithoutProperties(source, excluded) { if (source == null) return {}; var target = _objectWithoutPropertiesLoose(source, excluded); var key, i; if (Object.getOwnPropertySymbols) { var sourceSymbolKeys = Object.getOwnPropertySymbols(source); for (i = 0; i < sourceSymbolKeys.length; i++) { key = sourceSymbolKeys[i]; if (excluded.indexOf(key) >= 0) continue; if (!Object.prototype.propertyIsEnumerable.call(source, key)) continue; target[key] = source[key]; } } return target; }
function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; }
var defaultStyle = {
"willChange": "transform"
};
var QCU132 = React.forwardRef(function (_ref, ref) {
var className = _ref.className,
children = _ref.children,
style = _ref.style,
tabIndex = _ref.tabIndex,
rest = _objectWithoutProperties(_ref, ["className", "children", "style", "tabIndex"]);
var _getAttributes = iconHelpers.getAttributes(_objectSpread({}, rest, {
tabindex: tabIndex
})),
tabindex = _getAttributes.tabindex,
props = _objectWithoutProperties(_getAttributes, ["tabindex"]);
if (className) {
props.className = className;
}
if (tabindex !== undefined && tabindex !== null) { | }
if (_typeof(style) === 'object') {
props.style = _objectSpread({}, defaultStyle, style);
} else {
props.style = defaultStyle;
}
if (ref) {
props.ref = ref;
}
return React.createElement('svg', props, children, React.createElement('path', {
d: 'M10 23H5a2 2 0 0 1-2-2v-6a2 2 0 0 1 2-2h5v2H5v6h5zm8 0h-4a2 2 0 0 1-2-2V9h2v12h4V9h2v12a2 2 0 0 1-2 2zm9-2V9.01h-5v2h3V21h-3v2h8v-2h-3z'
}));
});
QCU132.displayName = 'QCU132';
QCU132.propTypes = {
'aria-hidden': PropTypes.bool,
'aria-label': PropTypes.string,
'aria-labelledby': PropTypes.string,
className: PropTypes.string,
children: PropTypes.node,
height: PropTypes.number,
preserveAspectRatio: PropTypes.string,
tabIndex: PropTypes.string,
viewBox: PropTypes.string,
width: PropTypes.number,
xmlns: PropTypes.string
};
QCU132.defaultProps = {
width: 32,
height: 32,
viewBox: '0 0 32 32',
xmlns: 'http://www.w3.org/2000/svg',
preserveAspectRatio: 'xMidYMid meet'
};
return QCU132;
}))); | props.tabIndex = tabindex; |
math.py | import builtins
import warnings
import numpy as np
from aesara import config, printing
from aesara import scalar as aes
from aesara.gradient import DisconnectedType
from aesara.graph.basic import Apply, Variable
from aesara.graph.op import COp, Op
from aesara.graph.params_type import ParamsType
from aesara.graph.type import Generic
from aesara.misc.safe_asarray import _asarray
from aesara.printing import pprint
from aesara.scalar.basic import BinaryScalarOp
from aesara.tensor.basic import (
alloc,
arange,
as_tensor_variable,
cast,
concatenate,
constant,
patternbroadcast,
stack,
switch,
)
from aesara.tensor.elemwise import (
CAReduce,
CAReduceDtype,
DimShuffle,
Elemwise,
scalar_elemwise,
)
from aesara.tensor.shape import shape
from aesara.tensor.type import (
complex_dtypes,
continuous_dtypes,
discrete_dtypes,
int_dtypes,
integer_dtypes,
tensor,
uint_dtypes,
)
from aesara.tensor.type_other import NoneConst
from aesara.tensor.utils import as_list
from aesara.tensor.var import TensorConstant, _tensor_py_operators
# We capture the builtins that we are going to replace to follow the numpy API
_abs = builtins.abs
if int(config.tensor__cmp_sloppy) > 1:
# This config variable is a quick-and-dirty way to get low-precision
# comparisons. For a more precise setting of these tolerances set
# them explicitly in your user code by assigning, for example,
# "aesara.tensor.math.float32_atol = ..."
# When config.tensor__cmp_sloppy>1 we are even more sloppy. This is
# useful to test the GPU as they don't use extended precision and
# this cause some difference bigger then the normal sloppy.
float16_atol = 1e-2
float16_rtol = 5e-2
float32_atol = 5e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
elif int(config.tensor__cmp_sloppy):
float16_atol = 5e-3
float16_rtol = 1e-2
float32_atol = 1e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
else:
# If you change those value in test don't forget to put them back
# when the test end. Don't forget the case when the test fail.
float16_atol = 1e-3
float16_rtol = 1e-3
float32_atol = 1e-5
float32_rtol = 1e-5
# defaults in numpy.allclose
# Don't be more strict then numpy rtol
# It cause useless error.
float64_rtol = 1.0000000000000001e-05
float64_atol = 1e-8
def _get_atol_rtol(a, b):
tiny = ("float16",)
narrow = ("float32", "complex64")
if (str(a.dtype) in tiny) or (str(b.dtype) in tiny):
atol = float16_atol
rtol = float16_rtol
elif (str(a.dtype) in narrow) or (str(b.dtype) in narrow):
atol = float32_atol
rtol = float32_rtol
else:
atol = float64_atol
rtol = float64_rtol
return atol, rtol
def _allclose(a, b, rtol=None, atol=None):
a = np.asarray(a)
b = np.asarray(b)
atol_, rtol_ = _get_atol_rtol(a, b)
if rtol is not None:
rtol_ = rtol
if atol is not None:
atol_ = atol
return np.allclose(a, b, atol=atol_, rtol=rtol_)
class MaxAndArgmax(COp):
"""
Calculate the max and argmax over a given axis or over all axes.
"""
nin = 2 # tensor, axis
nout = 2 # max val, max idx
E_axis = "invalid axis"
params_type = Generic()
__props__ = ("axis",)
_f16_ok = True
def __init__(self, axis):
assert isinstance(axis, list)
self.axis = tuple(axis)
def get_params(self, node):
return self.axis
def make_node(self, x):
x = as_tensor_variable(x)
# We keep the original broadcastable flags for dimensions on which
# we do not perform the max / argmax.
all_axes = set(self.axis)
broadcastable = [
b for i, b in enumerate(x.type.broadcastable) if i not in all_axes
]
inputs = [x]
outputs = [
tensor(x.type.dtype, broadcastable, name="max"),
tensor("int64", broadcastable, name="argmax"),
]
return Apply(self, inputs, outputs)
def perform(self, node, inp, outs, params):
x = inp[0]
axes = params
max, max_idx = outs
if axes is None:
axes = tuple(range(x.ndim))
else:
axes = tuple(int(ax) for ax in axes)
max[0] = _asarray(np.max(x, axes), dtype=node.outputs[0].dtype)
# Numpy does not support multiple axes for argmax
# Work around
keep_axes = np.array([i for i in range(x.ndim) if i not in axes], dtype="int64")
# Not-reduced axes in front
transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))
kept_shape = transposed_x.shape[: len(keep_axes)]
reduced_shape = transposed_x.shape[len(keep_axes) :]
# Numpy.prod returns 1.0 when arg is empty, so we cast it to int64
# Otherwise reshape would complain citing float arg
new_shape = kept_shape + (np.prod(reduced_shape, dtype="int64"),)
reshaped_x = transposed_x.reshape(new_shape)
max_idx[0] = _asarray(np.argmax(reshaped_x, axis=-1), dtype="int64")
def c_code(self, node, name, inp, out, sub):
if len(self.axis) != 1 and len(self.axis) != node.inputs[0].ndim:
raise NotImplementedError(
"NumPy C-API can compute max and argmax only for 1 axis or for all axes."
)
x = inp[0]
axis = sub["params"]
max, argmax = out
fail = sub["fail"]
ret = """
#if PY_MAJOR_VERSION >= 3
#ifndef PyInt_AS_LONG
#define PyInt_AS_LONG PyLong_AS_LONG
#endif
#endif
int axis;
if (PyTuple_GET_SIZE(%(axis)s) == PyArray_NDIM(%(x)s)) {
axis = NPY_MAXDIMS;
} else if(PyTuple_GET_SIZE(%(axis)s) == 1) {
PyObject* axis_object = PyTuple_GET_ITEM(%(axis)s, 0);
axis = (int)PyInt_AS_LONG(axis_object);
if (axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)) {
PyErr_SetString(PyExc_ValueError,
"MaxAndArgmax: bad axis argument");
%(fail)s
}
} else {
PyErr_SetString(PyExc_NotImplementedError,
"MaxAndArgmax: NumPy C-API can compute max and argmax only for 1 axis or for all axes.");
%(fail)s
}
Py_CLEAR(%(max)s);
Py_CLEAR(%(argmax)s);//todo pass them as out parameter.
%(max)s = (PyArrayObject*)PyArray_Max(%(x)s, axis, NULL);
if (%(max)s == NULL) {
%(fail)s;
}
if (!PyArray_CheckExact(%(max)s)) {
%(max)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(max)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(max)s == NULL){
%(fail)s;
}
}
%(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);
if (%(argmax)s == NULL) {
Py_CLEAR(%(max)s);
%(fail)s;
}
if (!PyArray_CheckExact(%(argmax)s)) {
%(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(argmax)s == NULL){
%(fail)s;
}
}
if (PyArray_TYPE(%(argmax)s) != NPY_INT64) {
PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);
if (NULL == tmp){
%(fail)s;
}
Py_DECREF(%(argmax)s);
%(argmax)s = (PyArrayObject*)tmp;
}
"""
return ret % locals()
def c_code_cache_version(self):
return (5,)
def infer_shape(self, fgraph, node, shapes):
ishape = shapes[0]
rval = tuple(
ishape[i]
for (i, b) in enumerate(node.inputs[0].type.broadcastable)
if i not in self.axis
)
return [rval, rval]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None, None]
if len(self.axis) != 1:
raise ValueError("R_op supported for arg_max only for " "one axis!")
if self.axis[0] > 1:
raise ValueError("R_op supported for arg_max only when " " axis is 0 or 1")
if inputs[0].ndim != 2:
raise ValueError(
"R_op supported for arg_max only when " " input is a matrix"
)
max_vals, max_pos = self.make_node(*inputs).outputs
if self.axis[0] == 0:
return [eval_points[0][max_pos, arange(eval_points[0].shape[1])], None]
else:
return [eval_points[0][arange(eval_points[0].shape[0]), max_pos], None]
def grad(self, inp, grads):
# The strict sense mathematical gradient of the maximum function is
# not calculated here for it is not defined at every point where some
# coordinates are identical. However, since the latter set has null
# Lebesgue measure, the result may be interpreted as weak gradient.
# @note: This function should work correctly for L{vector}s.
# (x, y), (gz, gw)
# gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy
# gMax * dMax/dx + gArgMax * dArgMax/dx,
# gMax * dMax/daxis + gArgMax * dArgMax/daxis
# g_max has one less dimension than x, so you need to complete
# g_max to x's shape when axis=0 the broadcasting mechanism
# does it automatically
x = inp[0]
axis = as_tensor_variable(self.axis)
g_max, g_max_idx = grads
g_max_disconnected = isinstance(g_max.type, DisconnectedType)
g_max_idx_disconnected = isinstance(g_max_idx.type, DisconnectedType)
# if the op is totally disconnected, so are its inputs
if g_max_disconnected and g_max_idx_disconnected:
return [DisconnectedType()(), DisconnectedType()()]
# if the max is disconnected but the argmax is not,
# the gradient on its inputs is zero
if g_max_disconnected:
return [x.zeros_like()]
if NoneConst.equals(axis):
axis_ = list(range(x.ndim))
else:
axis_ = axis
xmax = max(x, axis_)
# Raise the g_max and xmax to the same number of dim as the input.
pattern = []
out_dim = 0
if NoneConst.equals(axis):
# We are taking the max/argmax over all dimensions.
axis = None
for i in range(x.ndim):
if axis is None or i in axis.data:
pattern.append("x")
else:
pattern.append(out_dim)
out_dim += 1
g_max_pad = DimShuffle(g_max.broadcastable, pattern)(g_max)
xmax_pad = DimShuffle(xmax.broadcastable, pattern)(xmax)
# Set the grad to the correct position.
g_x = eq(xmax_pad, x) * g_max_pad
return (g_x,)
class Argmax(COp):
"""
Calculate the argmax over a given axis or over all axes.
"""
nin = 2 # tensor, axis
nout = 1
E_axis = "invalid axis"
__props__ = ("axis",)
_f16_ok = True
params_type = ParamsType(c_axis=aes.int64)
def __init__(self, axis):
if axis is not None:
axis = tuple(axis)
self.axis = tuple(axis)
def get_params(self, node):
if self.axis is not None and len(self.axis) == 1:
c_axis = np.int64(self.axis[0])
else:
# The value here doesn't matter, it won't be used
c_axis = np.int64(-1)
return self.params_type.get_params(c_axis=c_axis)
def make_node(self, x, axis=None):
x = as_tensor_variable(x)
if self.axis is None:
all_axes = list(range(x.ndim))
else:
all_axes = self.axis
inputs = [x]
# We keep the original broadcastable flags for dimensions on which
# we do not perform the argmax.
broadcastable = [
b for i, b in enumerate(x.type.broadcastable) if i not in all_axes
]
outputs = [tensor("int64", broadcastable, name="argmax")]
return Apply(self, inputs, outputs)
def prepare_node(self, node, storage_map, compute_map, impl):
if len(node.inputs) == 2:
raise ValueError(
"You are trying to compile a graph with an old Argmax node. Either reoptimize your graph or rebuild it to get the new node format."
)
def perform(self, node, inp, outs, params):
(x,) = inp
axes = self.axis
(max_idx,) = outs
if axes is None:
axes = tuple(range(x.ndim))
# Numpy does not support multiple axes for argmax
# Work around
keep_axes = np.array([i for i in range(x.ndim) if i not in axes], dtype="int64")
# Not-reduced axes in front
transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))
kept_shape = transposed_x.shape[: len(keep_axes)]
reduced_shape = transposed_x.shape[len(keep_axes) :]
new_shape = kept_shape + (np.prod(reduced_shape),)
reshaped_x = transposed_x.reshape(new_shape)
max_idx[0] = _asarray(np.argmax(reshaped_x, axis=-1), dtype="int64")
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(argmax,) = out
fail = sub["fail"]
params = sub["params"]
if self.axis is None:
axis_code = "axis = NPY_MAXDIMS;"
else:
if len(self.axis) > 1:
raise NotImplementedError()
# params is only used here for now
axis_code = (
"""
axis = %(params)s->c_axis;
if(axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)){
PyErr_SetString(PyExc_ValueError,
"Argmax, bad axis argument");
%(fail)s
}
"""
% locals()
)
ret = """
int axis;
Py_CLEAR(%(argmax)s);//todo pass them as out parameter.
%(axis_code)s
%(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);
if(%(argmax)s == NULL){
%(fail)s;
}
if(!PyArray_CheckExact(%(argmax)s)){
%(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(argmax)s == NULL){
%(fail)s;
}
}
if(PyArray_TYPE(%(argmax)s) != NPY_INT64){
PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);
if (NULL == tmp){
%(fail)s;
}
Py_DECREF(%(argmax)s);
%(argmax)s = (PyArrayObject*)tmp;
}
"""
return ret % locals()
def c_code_cache_version(self):
return (1,)
def infer_shape(self, fgraph, node, shapes):
(ishape,) = shapes
if self.axis is None:
return [()]
rval = tuple(
[
ishape[i]
for (i, b) in enumerate(node.inputs[0].type.broadcastable)
if i not in self.axis
]
)
return [rval]
def grad(self, inp, grads):
(x,) = inp
return [x.zeros_like()]
def makeKeepDims(x, y, axis):
"""
Reintroduces in y with length one the axes of x which have been left out
in a prior reduction of x. With this option, the resulting tensor will
broadcast correctly against the original tensor x.
"""
x = as_tensor_variable(x)
y = as_tensor_variable(y)
if axis is None:
axis = list(range(x.type.ndim))
elif isinstance(axis, (int, np.integer)):
axis = [axis]
elif isinstance(axis, np.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
newaxis = []
for a in axis:
if not isinstance(a, int):
raise ValueError("keepdims option can be used only with constant axis")
if a < 0:
a += x.type.ndim
newaxis.append(a)
i = 0
new_dims = []
for j, _ in enumerate(x.type.broadcastable):
if j in newaxis:
new_dims.append("x")
else:
new_dims.append(i)
i += 1
return DimShuffle(y.type.broadcastable, new_dims)(y)
def check_and_normalize_axes(x, axis):
"""Check axes, normalize and convert them to a Python list of integers.
Parameters
----------
x: TensorVariable
axis: int, tuple or list of integers
Returns
-------
axis: list of integers
Return an empty list if argument is None.
"""
x = as_tensor_variable(x)
if axis is None:
axis = []
elif isinstance(axis, (int, np.integer)) or (
isinstance(axis, np.ndarray) and axis.ndim == 0
):
axis = [int(axis)]
elif isinstance(axis, (tuple, list, np.ndarray)):
axis = [int(i) for i in axis]
elif isinstance(axis, Variable):
if NoneConst.equals(axis):
axis = []
elif not isinstance(axis, TensorConstant):
raise TypeError(f"Computation needs a constant axis. Got {axis}")
else:
assert axis.dtype in integer_dtypes
if isinstance(axis.data, (int, np.integer)) or (
isinstance(axis.data, np.ndarray) and axis.data.ndim == 0
):
axis = [int(axis.data)]
elif isinstance(axis.data, (list, np.ndarray)):
axis = [int(i) for i in axis.data]
else:
raise TypeError(
f"Axis must be an integer, tuple, list of integers or a TensorVariable. Got {axis}"
)
if len(axis) > 0:
for i in range(len(axis)):
if axis[i] < 0:
axis[i] += x.type.ndim
if axis[i] < 0 or axis[i] >= x.type.ndim:
raise ValueError(
f"Computation needs a valid axis number for {int(x.type.ndim)}-D tensor. Got {int(axis[i])}"
)
axis = list(set(axis))
axis.sort()
return axis
def max_and_argmax(a, axis=None, keepdims=False):
"""
Returns maximum elements and their indices obtained by iterating over
given axis.
When axis is None (the default value), the max is performed
over the flattened tensor.
Parameters
----------
keepdims : bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
# Check axis and convert it to a Python list of integers.
# Axis will be used as an op param of MaxAndArgmax.
a = as_tensor_variable(a)
axis = check_and_normalize_axes(a, axis)
if len(axis) == 0:
axis = list(range(a.type.ndim))
out, argout = MaxAndArgmax(axis)(a)
if keepdims:
out = makeKeepDims(a, out, axis)
argout = makeKeepDims(a, argout, axis)
return [out, argout]
class NonZeroCAReduce(CAReduce):
def _c_all(self, node, name, inames, onames, sub):
decl, checks, alloc, loop, end = super()._c_all(node, name, inames, onames, sub)
# We add an additional check for zero-sized dimensions (This seems like
# something that could enabled in `elemwise_cgen.make_checks`.)
iname = inames[0]
axis = self.axis
if axis is None:
axis = list(range(len(node.inputs[0].type.broadcastable)))
pattern = [0] * len(node.inputs[0].broadcastable)
for i in axis:
pattern[i] = 1
pattern_ = str(pattern)[1:-1]
decl += f"""int tosum[]={{{pattern_}}};"""
alloc += f"""
for(int i=0;i<PyArray_NDIM({iname});i++){{
if(PyArray_DIMS({iname})[i]==0 && tosum[i]){{
PyErr_Format(PyExc_ValueError,
"Input of CAReduce{{{node.op.scalar_op}}} has zero-size on axis %%d",i);
{sub["fail"]};
}}
}}
"""
return decl, checks, alloc, loop, end
class Max(NonZeroCAReduce):
nfunc_spec = ("max", 1, 1)
def __init__(self, axis):
super().__init__(aes.scalar_maximum, axis)
class Min(NonZeroCAReduce):
nfunc_spec = ("min", 1, 1)
def __init__(self, axis):
super().__init__(aes.scalar_minimum, axis)
def max(x, axis=None, keepdims=False):
"""
Returns maximum elements obtained by iterating over given axis.
When axis is None (the default value), the max is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
Notes
-----
We return an error as numpy when we reduce a dim with a shape of 0.
"""
# We have a choice of implementing this call with the
# CAReduce op or the MaxAndArgmax op.
# MaxAndArgmax supports grad and Rop, so we prefer to use that.
# CAReduce is faster, but optimizations will replace MaxAndArgmax[0]
# with CAReduce at compile time, so at this stage the important
# thing is supporting all user interface features, not speed.
# Some cases can be implemented only with CAReduce.
# We thus prefer to use MaxAndArgmax, if possible. It does not
# support all axis arguments, so we may need to fall back to CAReduce.
try:
out = max_and_argmax(x, axis)[0]
except Exception:
out = Max(axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
def argmax(x, axis=None, keepdims=False):
"""
Returns indices of maximum elements obtained by iterating over given axis.
When axis is None (the default value), the argmax is performed
over the flattened tensor.
Parameters
----------
keepdims : bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
argout = max_and_argmax(x, axis)[1]
if keepdims:
argout = makeKeepDims(x, argout, axis)
return argout
def min(x, axis=None, keepdims=False):
"""
Returns minimum elements obtained by iterating over given axis.
When axis is None (the default value), the min is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
x = as_tensor_variable(x)
str_x_type = str(x.dtype)
if str_x_type.startswith("float") or str_x_type in int_dtypes:
return -max(-x, axis=axis, keepdims=keepdims)
elif str_x_type in uint_dtypes:
itype = np.iinfo(x.dtype)
max_val = np.array(itype.max, dtype=itype.dtype)
return max_val - max(max_val - x, axis=axis, keepdims=keepdims)
elif str_x_type == "bool":
return ~max(~x, axis=axis, keepdims=keepdims)
else:
# Be careful about unsigned integers, complex
raise NotImplementedError()
def argmin(x, axis=None, keepdims=False):
"""
Returns indices of minimum elements obtained by iterating over given axis.
When axis is None (the default value), the argmin is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
x = as_tensor_variable(x)
str_x_type = str(x.dtype)
if str_x_type.startswith("float") or str_x_type in int_dtypes:
return argmax(-x, axis=axis, keepdims=keepdims)
elif str_x_type in uint_dtypes:
itype = np.iinfo(x.dtype)
return argmax(itype.max - x, axis=axis, keepdims=keepdims)
elif str_x_type == "bool":
return argmax(~x, axis=axis, keepdims=keepdims)
else:
# Be careful about unsigned integers, complex
raise NotImplementedError()
def smallest(*args):
"""
Return the [elementwise] smallest of a variable number of arguments.
Like python's min.
"""
if len(args) == 2:
a, b = args
return switch(a < b, a, b)
else:
return min(stack(args), axis=0)
def largest(*args):
"""
Return the [elementwise] largest of a variable number of arguments.
Like python's max.
"""
if len(args) == 2:
a, b = args
return switch(a > b, a, b)
else:
return max(stack(args), axis=0)
@scalar_elemwise
def lt(a, b):
"""a < b"""
@scalar_elemwise
def gt(a, b):
"""a > b"""
@scalar_elemwise
def le(a, b):
"""a <= b"""
@scalar_elemwise
def ge(a, b):
"""a >= b"""
@scalar_elemwise
def eq(a, b):
"""a == b"""
@scalar_elemwise
def neq(a, b):
"""a != b"""
@scalar_elemwise
def isnan(a):
"""isnan(a)"""
# Rename isnan to isnan_ to allow to bypass it when not needed.
# glibc 2.23 don't allow isnan on int, so we remove it from the graph.
isnan_ = isnan
def isnan(a):
"""isnan(a)"""
a = as_tensor_variable(a)
if a.dtype in discrete_dtypes:
return alloc(
np.asarray(False, dtype="bool"), *[a.shape[i] for i in range(a.ndim)]
)
return isnan_(a)
@scalar_elemwise
def isinf(a):
"""isinf(a)"""
# Rename isnan to isnan_ to allow to bypass it when not needed.
# glibc 2.23 don't allow isnan on int, so we remove it from the graph.
isinf_ = isinf
def isinf(a):
"""isinf(a)"""
a = as_tensor_variable(a)
if a.dtype in discrete_dtypes:
return alloc(
np.asarray(False, dtype="bool"), *[a.shape[i] for i in range(a.ndim)]
)
return isinf_(a)
def allclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):
"""
Implement Numpy's ``allclose`` on tensors.
``absolute(a - b) <= (atol + rtol * absolute(b))``
Parameters
----------
a : tensor
Input to compare.
b : tensor
Input to compare.
rtol : float
The relative tolerance parameter.
atol : float
The absolute tolerance parameter.
equal_nan: bool
Whether to consider nan's in the same place to be close.
Returns
-------
bool
A boolean value (of type int8 returned by the tensor elementwise `all`
function) whether all elements in a and b are in the tolerance range
defined above.
Notes
-----
Not a symmetric equation. See Numpy's documentation.
"""
return all(isclose(a, b, rtol, atol, equal_nan))
def isclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):
"""
Implements Numpy's ``isclose`` on tensors.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
``absolute(a - b) <= (atol + rtol * absolute(b))``
Parameters
----------
a : tensor
Input to compare.
b : tensor
Input to compare.
rtol : float
The relative tolerance parameter.
atol : float
The absolute tolerance parameter.
equal_nan : bool
Whether to consider nan's in the same place to be close
Returns
-------
int8
A boolean (int8) array where two arrays are element-wise equal
within a tolerance.
Notes
-----
Not a symmetric equation. See Numpy's documentation.
Examples
--------
>>> import aesara
>>> import numpy as np
>>> a = _asarray([1e10, 1e-7], dtype="float64")
>>> b = _asarray([1.00001e10, 1e-8], dtype="float64")
>>> aesara.tensor.isclose(a, b).eval()
array([1, 0], dtype=int8)
>>> a = _asarray([1e10, 1e-8], dtype="float64")
>>> b = _asarray([1.00001e10, 1e-9], dtype="float64")
>>> aesara.tensor.isclose(a, b).eval()
array([1, 1], dtype=int8)
>>> a = _asarray([1e10, 1e-8], dtype="float64")
>>> b = _asarray([1.0001e10, 1e-9], dtype="float64")
>>> aesara.tensor.isclose(a, b).eval()
array([0, 1], dtype=int8)
>>> a = _asarray([1.0, np.nan], dtype="float64")
>>> b = _asarray([1.0, np.nan], dtype="float64")
>>> aesara.tensor.isclose(a, b).eval()
array([1, 0], dtype==int8)
>>> a = _asarray([1.0, np.nan], dtype="float64")
>>> b = _asarray([1.0, np.nan], dtype="float64")
>>> aesara.tensor.isclose(a, b, equal_nan=True).eval()
array([1, 1], dtype==int8)
>>> a = _asarray([1.0, np.inf], dtype="float64")
>>> b = _asarray([1.0, -np.inf], dtype="float64")
>>> aesara.tensor.isclose(a, b).eval()
array([1, 0], dtype==int8)
>>> a = _asarray([1.0, np.inf], dtype="float64")
>>> b = _asarray([1.0, np.inf], dtype="float64")
>>> aesara.tensor.isclose(a, b).eval()
array([1, 1], dtype==int8)
"""
# close will be an int8 array of 1 where within tolerance
# and 0 where not within tolerance or there was a nan or inf value.
diff = _abs(a - b)
tolerance = atol + rtol * _abs(b)
close_prelim = le(diff, tolerance)
a_nan = isnan(a)
b_nan = isnan(b)
nans = bitwise_or(a_nan, b_nan)
a_inf = isinf(a)
b_inf = isinf(b)
infs = bitwise_or(a_inf, b_inf)
nans_or_infs = bitwise_or(nans, infs)
# close is now an array of 0's except where elements are not nan or inf
# and are within the tolerance.
close = bitwise_and(close_prelim, bitwise_not(nans_or_infs))
# deal with signed inf values. this will make an array inf_eq of 0's
# except where inf values have the same sign.
both_infs = bitwise_and(a_inf, b_inf)
inf_signs_eq = eq(a_inf * sgn(a), b_inf * sgn(b))
inf_eq = bitwise_and(both_infs, inf_signs_eq)
# now create the potential result combining close and inf_eq
close_with_infs = bitwise_or(close, inf_eq)
# deal with comparing nan's.
if equal_nan:
both_nans = bitwise_and(a_nan, b_nan)
return bitwise_or(close_with_infs, both_nans)
# otherwise nan's aren't considered close.
else:
return close_with_infs
##########################
# Bit-wise
##########################
@scalar_elemwise
def and_(a, b):
"""bitwise a & b"""
bitwise_and = and_ # numpy name for it
@scalar_elemwise
def or_(a, b):
"""bitwise a | b"""
bitwise_or = or_ # numpy name for it
@scalar_elemwise
def xor(a, b):
"""bitwise a ^ b"""
bitwise_xor = xor # numpy name for it
@scalar_elemwise
def invert(a):
"""bitwise ~a"""
bitwise_not = invert # numpy alias for it
##########################
# Math
##########################
@scalar_elemwise
def abs(a):
"""|`a`|"""
# These are deprecated and will be removed
abs_ = abs
pprint.assign(abs, printing.PatternPrinter(("|%(0)s|", -1000)))
@scalar_elemwise
def exp(a):
"""e^`a`"""
@scalar_elemwise
def exp2(a):
"""2^`a`"""
@scalar_elemwise
def expm1(a):
"""e^`a` - 1"""
@scalar_elemwise
def neg(a):
"""-a"""
@scalar_elemwise
def reciprocal(a):
"""1.0/a"""
# This is deprecated and will be removed
inv = reciprocal
@scalar_elemwise
def log(a):
"""base e logarithm of a"""
@scalar_elemwise
def log2(a):
"""base 2 logarithm of a"""
@scalar_elemwise
def log10(a):
"""base 10 logarithm of a"""
@scalar_elemwise
def log1p(a):
"""log(1+a)"""
@scalar_elemwise
def sgn(a):
"""sign of a"""
@scalar_elemwise
def ceil(a):
"""ceiling of a"""
@scalar_elemwise
def floor(a):
"""floor of a"""
@scalar_elemwise
def trunc(a):
"""trunc of a"""
def iround(a, mode=None):
"""cast(round(a,mode),'int64')"""
return cast(round(a, mode), "int64")
def round(a, mode=None):
"""round_mode(a) with mode in [half_away_from_zero, half_to_even].
Default to half_to_even."""
if mode is None:
mode = "half_to_even"
if config.warn__round:
warnings.warn(
"aesara.tensor.round() changed its default from"
" `half_away_from_zero` to `half_to_even` to have"
" the same default as NumPy. Use the Aesara flag"
" `warn__round=False` to disable this warning."
)
if mode == "half_away_from_zero":
return round_half_away_from_zero(a)
elif mode == "half_to_even":
return round_half_to_even(a)
else:
raise Exception(f"round mode {mode} is not implemented.")
@scalar_elemwise
def round_half_to_even(a):
"""round_half_to_even(a)"""
@scalar_elemwise
def round_half_away_from_zero(a):
"""round_half_away_from_zero(a)"""
@scalar_elemwise
def sqr(a):
"""square of a"""
# alias to sqr, included to maintain similarity with numpy interface
square = sqr
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None):
"""Calculate the covariance matrix.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`m = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`. Code and docstring ported from numpy.
Parameters
==========
m : array_like
A 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column is
observations of all those variables.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof``.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
The default value is ``None``.
Returns
=======
out : The covariance matrix of the variables.
"""
if fweights is not None:
raise NotImplementedError("fweights are not implemented")
if aweights is not None:
raise NotImplementedError("aweights are not implemented")
if not rowvar and m.shape[0] != 1:
m = m.T
if y is not None:
if not rowvar and y.shape[0] != 1:
y = y.T
m = concatenate((m, y), axis=0)
if ddof is None:
if not bias:
ddof = 1
else:
ddof = 0
# Determine the normalization
fact = m.shape[1] - ddof
m -= m.mean(axis=1, keepdims=1)
c = m.dot(m.T)
c *= constant(1) / fact
return c.squeeze()
@scalar_elemwise
def sqrt(a):
"""square root of a"""
@scalar_elemwise
def deg2rad(a):
"""convert degree a to radian"""
@scalar_elemwise
def rad2deg(a):
"""convert radian a to degree"""
@scalar_elemwise
def cos(a):
"""cosine of a"""
@scalar_elemwise
def arccos(a):
"""arccosine of a"""
@scalar_elemwise
def sin(a):
"""sine of a"""
@scalar_elemwise
def arcsin(a):
"""arcsine of a"""
@scalar_elemwise
def tan(a):
"""tangent of a"""
@scalar_elemwise
def arctan(a):
"""arctangent of a"""
@scalar_elemwise
def arctan2(a, b):
"""arctangent of a / b"""
@scalar_elemwise
def cosh(a):
"""hyperbolic cosine of a"""
@scalar_elemwise
def arccosh(a):
"""hyperbolic arc cosine of a"""
@scalar_elemwise
def sinh(a):
"""hyperbolic sine of a"""
@scalar_elemwise
def arcsinh(a):
"""hyperbolic arc sine of a"""
@scalar_elemwise
def tanh(a):
"""hyperbolic tangent of a"""
@scalar_elemwise
def arctanh(a):
"""hyperbolic arc tangent of a"""
@scalar_elemwise
def erf(a):
"""error function"""
@scalar_elemwise
def erfc(a):
"""complementary error function"""
@scalar_elemwise
def erfcx(a):
"""scaled complementary error function"""
@scalar_elemwise
def erfinv(a):
"""inverse error function"""
@scalar_elemwise
def erfcinv(a):
"""inverse complementary error function"""
@scalar_elemwise
def gamma(a):
"""gamma function"""
@scalar_elemwise
def gammaln(a):
"""log gamma function"""
@scalar_elemwise
def psi(a):
"""derivative of log gamma function"""
@scalar_elemwise
def tri_gamma(a):
"""second derivative of the log gamma function"""
@scalar_elemwise
def chi2sf(x, k):
"""chi squared survival function"""
@scalar_elemwise
def gammainc(k, x):
"""Regularized lower gamma function"""
@scalar_elemwise
def gammaincc(k, x):
"""Regularized upper gamma function"""
@scalar_elemwise
def gammau(k, x):
"""Upper incomplete gamma function."""
@scalar_elemwise
def gammal(k, x):
"""Lower incomplete gamma function."""
@scalar_elemwise
def j0(x):
"""Bessel function of the first kind of order 0."""
@scalar_elemwise
def j1(x):
"""Bessel function of the first kind of order 1."""
@scalar_elemwise
def jv(v, x):
"""Bessel function of the first kind of order v (real)."""
@scalar_elemwise
def i0(x):
"""Modified Bessel function of the first kind of order 0."""
@scalar_elemwise
def i1(x):
"""Modified Bessel function of the first kind of order 1."""
@scalar_elemwise
def iv(v, x):
"""Modified Bessel function of the first kind of order v (real)."""
@scalar_elemwise
def sigmoid(x):
|
expit = sigmoid
@scalar_elemwise
def softplus(x):
"""Compute log(1 + exp(x)), also known as softplus or log1pexp"""
log1pexp = softplus
@scalar_elemwise
def log1mexp(x):
"""Compute log(1 - exp(x)), also known as log1mexp"""
@scalar_elemwise
def betainc(a, b, x):
"""Regularized incomplete beta function"""
@scalar_elemwise
def real(z):
"""Return real component of complex-valued tensor `z`"""
_tensor_py_operators.real = property(real)
@scalar_elemwise
def imag(z):
"""Return imaginary component of complex-valued tensor `z`"""
_tensor_py_operators.imag = property(imag)
@scalar_elemwise
def angle(z):
"""Return polar-coordinate angle of complex-valued tensor `z`"""
@scalar_elemwise # numpy.complex cannot build tensors
def complex(real, imag):
"""Return complex-valued tensor with `real` and `imag` components"""
@scalar_elemwise
def conj(z):
"""Return the complex conjugate of `z`."""
@scalar_elemwise
def complex_from_polar(abs, angle):
"""Return complex-valued tensor from polar coordinate specification."""
class Mean(CAReduce):
def __init__(self, axis=None):
super().__init__(aes.add, axis)
assert self.axis is None or len(self.axis) == 1
def __str__(self):
if self.axis is not None:
return "Mean{%s}" % (", ".join(str(x) for x in self.axis))
else:
return "Mean"
def _output_dtype(self, idtype):
# we want to protect against overflow
return "float64"
def perform(self, node, inp, out):
(input,) = inp
(output,) = out
if self.axis is None:
axis = None
else:
axis = self.axis[0]
# numpy.asarray is needed as otherwise we can end up with a
# numpy scalar.
output[0] = np.asarray(np.mean(input, dtype="float64", axis=axis))
def c_code(self, node, name, inames, onames, sub):
ret = super().c_code(node, name, inames, onames, sub)
if self.axis is not None:
return ret
# TODO: c_code perform support only axis is None
return (
ret
+ f"""
*((double *)PyArray_DATA({onames[0]})) /= PyArray_SIZE({inames[0]});
"""
)
# TODO: implement the grad. When done and tested, you can make this the default
# version.
# def grad(self, (x,), (gout,)):
# import pdb;pdb.set_trace()
# return grad(mean(x, self.axis, op=False),[x])
def mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None):
"""
Computes the mean value along the given axis(es) of a tensor `input`.
Parameters
----------
axis : None or int or (list of int) (see `Sum`)
Compute the mean along this axis of the tensor.
None means all axes (like numpy).
dtype: None or string
Dtype to cast the result of the inner summation into.
For instance, by default, a sum of a float32 tensor will be
done in float64 (acc_dtype would be float64 by default),
but that result will be casted back in float32.
keepdims: bool
If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
acc_dtype: None or string
Dtype to use for the inner summation. This will not
necessarily be the dtype of the output (in particular
if it is a discrete (int/uint) dtype, the output will
be in a float type). If None, then we use the same rules as `sum()`.
Notes
-----
For gpu, if you specify dtype=float32, everything will be done on the gpu.
"""
input = as_tensor_variable(input)
if op:
if dtype not in (None, "float64"):
raise NotImplementedError(
"The Mean op does not support the dtype argument, "
"and will always use float64. If you want to specify "
"the dtype, call tensor.mean(..., op=False).",
dtype,
)
if acc_dtype not in (None, "float64"):
raise NotImplementedError(
"The Mean op does not support the acc_dtype argument, "
"and will always use float64. If you want to specify "
"acc_dtype, call tensor.mean(..., op=False).",
dtype,
)
out = Mean(axis)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
if dtype is not None:
# The summation will be done with the specified dtype.
# sum() will complain if it is not suitable.
sum_dtype = dtype
else:
sum_dtype = None
# float16 overflows on the cast way too often
if input.dtype == "float16":
sum_dtype = "float32"
s = sum(input, axis=axis, dtype=sum_dtype, keepdims=keepdims, acc_dtype=acc_dtype)
shp = shape(input)
# Cast shp into a float type
# TODO Once we have a consistent casting policy, we could simply
# use true_div.
if s.dtype in ("float16", "float32", "complex64"):
shp = cast(shp, "float32")
else:
shp = cast(shp, "float64")
if axis is None:
axis = list(range(input.ndim))
elif isinstance(axis, (int, np.integer)):
axis = [axis]
elif isinstance(axis, np.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
# This sequential division will possibly be optimized by Aesara:
for i in axis:
s = true_div(s, shp[i])
# This can happen when axis is an empty list/tuple
if s.dtype != shp.dtype and s.dtype in discrete_dtypes:
s = cast(s, shp.dtype)
if dtype == "float16" or (dtype is None and input.dtype == "float16"):
s = cast(s, "float16")
s.name = "mean"
return s
def var(input, axis=None, ddof=0, keepdims=False, corrected=False):
"""
Computes the variance along the given axis(es) of a tensor `input`.
Parameters
----------
axis: None or int or (list of int) (see `Sum`)
Compute the variance along this axis of the tensor.
None means all axes (like numpy).
ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute
the unbiased estimate.
keepdims : bool
If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
corrected : bool
If this is set to True, the 'corrected_two_pass' algorithm is
used to compute the variance.
Refer : http://www.cs.yale.edu/publications/techreports/tr222.pdf
Notes
-----
Default uses the two-pass algorithm (reference below).
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
Also supports 'corrected_two_pass' algorithm (using the 'corrected' flag)
which is numerically more stable. There exist other implementations that
offer better stability, but probably slower.
"""
if isinstance(ddof, (bool)):
raise ValueError(
"Parameter keepdims is now at index 3: (input, \
axis=None, ddof=0, keepdims=False, corrected=False)"
)
input_ndim = input.type.ndim
if axis is None:
axis = list(range(input_ndim))
elif isinstance(axis, (int, np.integer)):
axis = [axis]
elif isinstance(axis, np.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
# compute the axis-wise mean
mean_input = mean(input, axis, keepdims=True)
# center the input
centered_input = input - mean_input
# return the mean sqr
two = constant(2, dtype=centered_input.dtype)
if ddof == 0:
v = mean((centered_input ** two), axis, keepdims=keepdims)
else:
shp = shape(input) - ddof
v = sum((centered_input ** two), axis=axis, keepdims=keepdims)
for i in axis:
v = true_div(v, shp[i])
# use 'corrected_two_pass' algorithm
if corrected:
if ddof == 0:
error = mean(centered_input, axis, keepdims=keepdims) ** 2
else:
shp = shape(input) - ddof
shp_inp = shape(input)
error = sum(centered_input, axis=axis, keepdims=keepdims) ** 2
for i in axis:
error = true_div(error, shp[i] * shp_inp[i])
v = v - error
v.name = "var"
return v
def std(input, axis=None, ddof=0, keepdims=False, corrected=False):
"""
Computes the standard deviation along the given axis(es) of a tensor `input`.
Parameters
----------
axis: None or int or (list of int) (see `Sum`)
Compute the variance along this axis of the tensor.
None means all axes (like numpy).
ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute
the unbiased estimate.
keepdims : bool
If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
corrected : bool
If this is set to True, the 'corrected_two_pass' algorithm is
used to compute the variance.
Refer : http://www.cs.yale.edu/publications/techreports/tr222.pdf
Notes
-----
It calls 'var()' and 'var()' uses the two-pass algorithm (reference below).
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
Function 'var()' also supports 'corrected_two_pass' algorithm (using the
'corrected' flag) which is numerically more stable. There exist other
implementations that offer better stability, but probably slower.
"""
if isinstance(ddof, (bool)):
raise ValueError(
"Parameter keepdims is now at index 3: (input, \
axis=None, ddof=0, keepdims=False, corrected=False)"
)
ret = sqrt(
var(input=input, axis=axis, ddof=ddof, keepdims=keepdims, corrected=corrected)
)
ret.name = "std"
return ret
@scalar_elemwise(symbolname="scalar_maximum")
def maximum(x, y):
"""elemwise maximum. See max for the maximum in one tensor"""
# see decorator for function body
@scalar_elemwise(symbolname="scalar_minimum")
def minimum(x, y):
"""elemwise minimum. See min for the minimum in one tensor"""
# see decorator for function body
def divmod(x, y):
"""elementvise divmod, using floor_div and mod_check"""
return floor_div(x, y), mod_check(x, y)
@scalar_elemwise
def add(a, *other_terms):
"""elementwise addition"""
# see decorator for function body
@scalar_elemwise
def sub(a, b):
"""elementwise subtraction"""
# see decorator for function body
@scalar_elemwise
def mul(a, *other_terms):
"""elementwise multiplication"""
# see decorator for function body
@scalar_elemwise
def true_div(a, b):
"""elementwise [true] division (inverse of multiplication)"""
# see decorator for function body
@scalar_elemwise
def int_div(a, b):
"""elementwise [floor] division (inverse of multiplication)"""
# see decorator for function body
# floor_div and int_div are the same thing
floor_div = int_div
def ceil_intdiv(a, b):
"""
Safely compute ceil(float_division(a, b)).
Works for all dtypes, but mostly useful when a and b are int.
"""
# If a and b are int with not many significant bits, we could
# cast them to float to avoid doing the modulo. We do not know if this
# is faster or not. But this is not safe for int64 as the cast will
# lose precision.
# e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, aes.upcast(a, b))
# We cast for the case when a and b are uint*. Otherwise neq will
# force their upcast to int.
div = int_div(a, b)
ret = cast(neq(a % b, 0), div.dtype) + div
assert ret.dtype == aes.upcast(div.owner.inputs[0], div.owner.inputs[1])
return ret
def mod_check(x, y):
"""Make sure we do not try to use complex numbers."""
if (
as_tensor_variable(x).dtype in complex_dtypes
or as_tensor_variable(y).dtype in complex_dtypes
):
# Currently forbidden.
raise aes.Mod.complex_error
else:
return mod(x, y)
@scalar_elemwise
def mod(a, b):
"""elementwise modulo"""
# see decorator for function body
@scalar_elemwise
def pow(a, b):
"""elementwise power"""
# see decorator for function body
@scalar_elemwise
def clip(x, min, max):
"""
Clip x to be between min and max.
Note that when `x` is equal to the boundaries, the output is considered
to be `x`, so at these points, the gradient of the cost wrt the output
will be propagated to `x`, not to `min` nor `max`. In other words,
on these points, the gradient wrt `x` will be equal to the gradient wrt
the output, and the gradient wrt `min` and `max` will be zero.
"""
# see decorator for function body
# for grep: clamp, bound
pprint.assign(add, printing.OperatorPrinter("+", -2, "either"))
pprint.assign(mul, printing.OperatorPrinter("*", -1, "either"))
pprint.assign(sub, printing.OperatorPrinter("-", -2, "left"))
pprint.assign(neg, printing.OperatorPrinter("-", 0, "either"))
pprint.assign(true_div, printing.OperatorPrinter("/", -1, "left"))
pprint.assign(int_div, printing.OperatorPrinter("//", -1, "left"))
pprint.assign(pow, printing.OperatorPrinter("**", 1, "right"))
class Dot(Op):
"""
Computes the dot product of two variables. For two matrices, this is
equivalent to matrix multiplication. For two vectors, this is the inner
product.
Notes
-----
Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops
(see tensor.blas).
Vector-vector products are sometimes optimized to Ger or CGer (see
tensor.blas).
Matrix-vector products are sometimes optimized to Gemv, CGemv (see
tensor.blas).
"""
__props__ = ()
# the rationale for Dot22 is related to getting GEMM Ops into the
# graph. See Dot22 in tensor.blas for details.
def make_node(self, *inputs):
inputs = list(map(as_tensor_variable, inputs))
if len(inputs) != 2:
raise TypeError(f"Two arguments required, {len(inputs)} given ")
if inputs[0].ndim not in (1, 2):
raise TypeError(
"Input 0 (0-indexed) must have ndim of "
f"1 or 2, {int(inputs[0].ndim)} given. Consider calling "
"aesara.tensor.dot instead."
)
if inputs[1].ndim not in (1, 2):
raise TypeError(
"Input 1 (0-indexed) must have ndim of "
f"1 or 2, {int(inputs[1].ndim)} given. Consider calling "
"aesara.tensor.dot instead."
)
i_broadcastables = [input.type.broadcastable for input in inputs]
bx, by = i_broadcastables
if len(by) == 2: # y is a matrix
bz = bx[:-1] + by[-1:]
elif len(by) == 1: # y is vector
bz = bx[:-1]
i_dtypes = [input.type.dtype for input in inputs]
outputs = [tensor(aes.upcast(*i_dtypes), bz)]
return Apply(self, inputs, outputs)
def perform(self, node, inp, out):
x, y = inp
(z,) = out
# the asarray is here because dot between two vectors
# gives a numpy float object but we need to return a 0d
# ndarray
z[0] = np.asarray(np.dot(x, y))
def grad(self, inp, grads):
x, y = inp
(gz,) = grads
xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim
# grad is scalar, so x is vector and y is vector
if gdim == 0:
xgrad = gz * y
ygrad = gz * x
# x is vector, y is matrix, grad is vector
elif xdim == 1 and ydim == 2:
xgrad = dot(gz, y.T)
ygrad = outer(x.T, gz)
# x is matrix, y is vector, grad is vector
elif xdim == 2 and ydim == 1:
xgrad = outer(gz, y.T)
ygrad = dot(x.T, gz)
# x is matrix, y is matrix, grad is matrix
elif xdim == ydim == 2:
xgrad = dot(gz, y.T)
ygrad = dot(x.T, gz)
# If x or y contain broadcastable dimensions but only one of
# them know that a matching dimensions is broadcastable, the
# above code don't always return the right broadcast pattern.
# This cause problem down the road. See gh-1461.
if xgrad.broadcastable != x.broadcastable:
xgrad = patternbroadcast(xgrad, x.broadcastable)
if ygrad.broadcastable != y.broadcastable:
ygrad = patternbroadcast(ygrad, y.broadcastable)
rval = xgrad, ygrad
for elem in rval:
assert elem.dtype.find("float") != -1
return rval
def R_op(self, inputs, eval_points):
# R_op for a \dot b evaluated at c for a and d for b is
# simply c \dot b + a \dot d
assert len(inputs) == 2
assert len(eval_points) == 2
if eval_points[0] is None and eval_points[1] is None:
return [None]
if eval_points[0]:
t1 = self(eval_points[0], inputs[1])
if eval_points[1]:
t2 = self(inputs[0], eval_points[1])
if eval_points[0] and eval_points[1]:
return [t1 + t2]
elif eval_points[0]:
return [t1]
else:
return [t2]
def infer_shape(self, fgraph, node, shapes):
xshp, yshp = shapes
x, y = node.inputs
# vector / vector
if x.ndim == 1 and y.ndim == 1:
return [()]
# matrix / vector
if x.ndim == 2 and y.ndim == 1:
return [xshp[:-1]]
# vector / matrix
if x.ndim == 1 and y.ndim == 2:
return [yshp[-1:]]
# matrix / matrix
if x.ndim == 2 and y.ndim == 2:
return [xshp[:-1] + yshp[-1:]]
raise NotImplementedError()
def __str__(self):
return "dot"
_dot = Dot()
pprint.assign(
_dot, printing.OperatorPrinter(printing.special["middle_dot"], -1, "left")
)
def dot(l, r):
"""Return a symbolic dot product.
This is designed to work with both sparse and dense tensors types.
"""
if not isinstance(l, Variable):
l = as_tensor_variable(l)
if not isinstance(r, Variable):
r = as_tensor_variable(r)
try:
res = l.__dot__(r)
if res is NotImplemented:
raise NotImplementedError
except (NotImplementedError, AttributeError, TypeError):
res = r.__rdot__(l)
if res is NotImplemented:
raise NotImplementedError()
return res
def dense_dot(a, b):
"""
Computes the dot product of two variables.
For two matrices, this is equivalent to matrix multiplication.
For two vectors, this is the inner product.
When one variable is a scalar, this is like elementwise multiplication.
For N dimensions, this is a sum product over the last axis
of the first array and the second-to-last axis of the second array:
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Note that this dot function does one of three things, in the following
sequence:
1. If either a or b is scalar, it returns the elementwise product
without calling the Aesara Dot op.
2. If either a or b has more than 2 dimensions, it calls Aesara's
tensordot function with appropriate axes. The tensordot function
expresses high-dimensional dot products in terms of 2D matrix
multiplications, so it may be possible to further optimize for
performance.
3. If both a and b have either 1 or 2 dimensions, it calls Aesara's
Dot op on a and b.
Notes
-----
Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops
(see tensor.blas).
Vector-vector products are sometimes optimized to Ger or CGer (see
tensor.blas).
Matrix-vector products are sometimes optimized to Gemv, CGemv (see
tensor.blas).
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
if a.ndim == 0 or b.ndim == 0:
return a * b
elif a.ndim > 2 or b.ndim > 2:
return tensordot(a, b, [[a.ndim - 1], [np.maximum(0, b.ndim - 2)]])
else:
return _dot(a, b)
def _tensordot_as_dot(a, b, axes, dot, batched):
"""
Reduces a tensor dot product to a matrix or vector dot product. Based
on code from Tijmen Tieleman's gnumpy
(http://www.cs.toronto.edu/~tijmen/gnumpy.html).
Please see the documentation of tensordot for the meaning of the a, b
and axes arguments.
:param dot: a function that accepts two symbolic variables and computes
the appropriate dot product (e.g. dot, batched_dot)
:type dot: function
:param batched: whether to treat the first axis of a and b as a batch
axis. If so, this axis will be preserved in the output,
allowing this function to be used also for batched
tensor dot products.
:type batched: boolean
:returns: a tensor with shape equal to the concatenation of a's shape
(less any dimensions that were summed over) and b's shape
(less the first dimension and any dimensions that were summed
over).
:rtype: symbolic tensor
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
if not np.isscalar(axes) and len(axes) != 2:
raise ValueError(
"Axes should be an integer or a "
"list/tuple of len 2 ({axes} was provided)"
)
# if 'axes' is a number of axes to multiply and sum over (trailing axes
# of a, leading axes of b), we can just reshape and use dot.
elif np.isscalar(axes):
axes = int(axes)
for operand_name, operand in (("a", a), ("b", b)):
if axes > operand.ndim:
raise ValueError(
f"axes can not be larger than the dimension of {operand_name} "
f"({operand_name}.ndim={operand.ndim}, axes={axes})"
)
if batched and axes == operand.ndim:
raise ValueError(
"axes to sum over must not include the batch axis "
f"of {operand_name} ({operand_name}.ndim={operand.ndim}, axes={axes})"
)
batch_axes = 1 if batched else 0
a_outaxes = slice(0, a.ndim - axes)
b_outaxes = slice(batch_axes + axes, b.ndim)
outshape = concatenate([a.shape[a_outaxes], b.shape[b_outaxes]])
outbcast = a.broadcastable[a_outaxes] + b.broadcastable[b_outaxes]
outndim = len(outbcast)
a_shape = [1] * 2
b_shape = [1] * 2
# compute total size of summed axes
for i in range(0, axes):
a_shape[1] *= a.shape[-(i + 1)]
b_shape[0] *= b.shape[batch_axes + i]
# compute total size of other axes
for i in range(0, a.ndim - axes - batch_axes):
a_shape[0] *= a.shape[batch_axes + i]
for i in range(0, b.ndim - axes - batch_axes):
b_shape[1] *= b.shape[-(i + 1)]
if batched:
a_shape.insert(0, a.shape[0])
b_shape.insert(0, b.shape[0])
a_reshaped = a.reshape(a_shape)
b_reshaped = b.reshape(b_shape)
out_reshaped = dot(a_reshaped, b_reshaped)
out = out_reshaped.reshape(outshape, outndim)
# Make sure the broadcastable pattern of the result is correct,
# since some shape information can be lost in the reshapes.
return patternbroadcast(out, outbcast)
# if 'axes' is a list, transpose a and b such that the summed axes of a
# are last and the summed axes of b are first.
else:
axes = [as_list(axes_) for axes_ in axes]
if len(axes[0]) != len(axes[1]):
raise ValueError("Axes elements must have the same length.")
for i, (operand_name, operand) in enumerate((("a", a), ("b", b))):
if len(axes[i]) > operand.ndim:
raise ValueError(
f"axes[{i}] should be array_like with length less than "
f"the dimensions of {operand_name} ({operand_name}.ndim={operand.ndim}, len(axes[0])={len(axes[i])})."
)
if len(axes[i]) > 0 and np.max(axes[i]) >= operand.ndim:
raise ValueError(
f"axes[{i}] contains dimensions greater than or equal "
f"to {operand_name}.ndim ({operand_name}.ndim={operand.ndim}, max(axes[0])={np.max(np.array(axes[i]))})."
)
if batched and 0 in axes[i]:
raise ValueError(
"axes to sum over must not contain the batch axis "
f"(axes[{i}]={axes[i]})"
)
batch_axes = [0] if batched else []
other_axes = [
[x for x in range(operand.ndim) if x not in axes[i] and x not in batch_axes]
for i, operand in enumerate((a, b))
]
a_shuffled = a.dimshuffle(batch_axes + other_axes[0] + axes[0])
b_shuffled = b.dimshuffle(batch_axes + axes[1] + other_axes[1])
# now that a and b are in the right order, recur with integer axes
return _tensordot_as_dot(
a_shuffled, b_shuffled, len(axes[0]), dot=dot, batched=batched
)
def tensordot(a, b, axes=2):
"""
Compute a generalized dot product over provided axes.
Given two tensors a and b, tensordot computes a generalized dot product over
the provided axes. Aesara's implementation reduces all expressions to
matrix or vector dot products and is based on code from Tijmen Tieleman's
gnumpy (http://www.cs.toronto.edu/~tijmen/gnumpy.html).
Parameters
----------
a: symbolic tensor
The first tensor variable.
b: symbolic tensor
The second tensor variable
axes: int or array-like of length 2
If an integer, the number of axes to sum over.
If an array, it must have two array elements containing the axes
to sum over in each tensor.
Note that the default value of 2 is not guaranteed to work
for all values of a and b, and an error will be raised if
that is the case. The reason for keeping the default is to
maintain the same signature as numpy's tensordot function
(and np.tensordot raises analogous errors for non-compatible
inputs).
If an integer i, it is converted to an array containing
the last i dimensions of the first tensor and the first
i dimensions of the second tensor:
axes = [list(range(a.ndim - i, b.ndim)), list(range(i))]
If an array, its two elements must contain compatible axes
of the two tensors. For example, [[1, 2], [2, 0]] means sum
over the 2nd and 3rd axes of a and the 3rd and 1st axes of b.
(Remember axes are zero-indexed!) The 2nd axis of a and the
3rd axis of b must have the same shape; the same is true for
the 3rd axis of a and the 1st axis of b.
Returns
-------
symbolic tensor
A tensor with shape equal to the concatenation of a's shape
(less any dimensions that were summed over) and b's shape
(less any dimensions that were summed over).
Examples
--------
It may be helpful to consider an example to see what tensordot does.
Aesara's implementation is identical to NumPy's. Here a has shape (2, 3, 4)
and b has shape (5, 6, 4, 3). The axes to sum over are [[1, 2], [3, 2]] --
note that a.shape[1] == b.shape[3] and a.shape[2] == b.shape[2]; these axes
are compatible. The resulting tensor will have shape (2, 5, 6) -- the
dimensions that are not being summed:
>>> a = np.random.random((2,3,4))
>>> b = np.random.random((5,6,4,3))
#tensordot
>>> c = np.tensordot(a, b, [[1,2],[3,2]])
#loop replicating tensordot
>>> a0, a1, a2 = a.shape
>>> b0, b1, _, _ = b.shape
>>> cloop = np.zeros((a0,b0,b1))
#loop over non-summed indices -- these exist
#in the tensor product.
>>> for i in range(a0):
... for j in range(b0):
... for k in range(b1):
... #loop over summed indices -- these don't exist
... #in the tensor product.
... for l in range(a1):
... for m in range(a2):
... cloop[i,j,k] += a[i,l,m] * b[j,k,m,l]
>>> np.allclose(c, cloop)
true
This specific implementation avoids a loop by transposing a and b such that
the summed axes of a are last and the summed axes of b are first. The
resulting arrays are reshaped to 2 dimensions (or left as vectors, if
appropriate) and a matrix or vector dot product is taken. The result is
reshaped back to the required output dimensions.
In an extreme case, no axes may be specified. The resulting tensor
will have shape equal to the concatenation of the shapes of a and b:
>>> c = np.tensordot(a, b, 0)
>>> print(a.shape)
(2,3,4)
>>> print(b.shape)
(5,6,4,3)
>>> print(c.shape)
(2,3,4,5,6,4,3)
See the documentation of numpy.tensordot for more examples.
"""
return _tensordot_as_dot(a, b, axes, dot=dot, batched=False)
def outer(x, y):
"""Return vector-vector outer product.
If an input isn't a vector, we flatten it first.
"""
if x.ndim != 1:
x = x.flatten()
if y.ndim != 1:
y = y.flatten()
return dot(x.dimshuffle(0, "x"), y.dimshuffle("x", 0))
class All(CAReduce):
"""Applies `logical and` to all the values of a tensor along the
specified axis(es).
"""
__props__ = ("axis",)
nfunc_spec = ("all", 1, 1)
def __init__(self, axis=None):
super().__init__(aes.and_, axis)
def _output_dtype(self, idtype):
return "bool"
def __str__(self):
if self.axis is None:
return "All"
else:
return "All{%s}" % ", ".join(map(str, self.axis))
def make_node(self, input):
input = as_tensor_variable(input)
if input.dtype != "bool":
input = neq(input, 0)
ret = super().make_node(input)
return ret
def grad(self, inp, grads):
(x,) = inp
return [x.zeros_like(config.floatX)]
class Any(CAReduce):
"""Applies `bitwise or` to all the values of a tensor along the
specified axis(es).
"""
__props__ = ("axis",)
nfunc_spec = ("any", 1, 1)
def __init__(self, axis=None):
super().__init__(aes.or_, axis)
def _output_dtype(self, idtype):
return "bool"
def __str__(self):
if self.axis is None:
return "Any"
else:
return "Any{%s}" % ", ".join(map(str, self.axis))
def make_node(self, input):
input = as_tensor_variable(input)
if input.dtype != "bool":
input = neq(input, 0)
ret = super().make_node(input)
return ret
def grad(self, inp, grads):
(x,) = inp
return [x.zeros_like(config.floatX)]
class Sum(CAReduceDtype):
"""
Sums all the values of a tensor along the specified axis(es).
Equivalent to `CAReduceDtype(scalar.add, axis=axis, dtype=dtype)`,
with the difference that this defines the gradient of sum wrt its
tensor input.
Parameters
----------
axis
Axis(es) along which the tensor should be summed
(use None to sum over all axes, and a list or tuple to sum along more
than one axis).
dtype
The dtype of the internal accumulator and returned
tensor. If None, then we use the default dtype which is the same as the
input tensor's dtype except when:
- the input dtype is a signed integer of precision < 64 bit, in
which case we use int64
- the input dtype is an unsigned integer of precision < 64 bit, in
which case we use uint64
This value does not depend on the value of "acc_dtype".
acc_dtype
The dtype of the internal accumulator.
If None (default), we use the dtype in the list below,
or the input dtype if its precision is higher:
- for int dtypes, we use at least int64;
- for uint dtypes, we use at least uint64;
- for float dtypes, we use at least float64;
- for complex dtypes, we use at least complex128.
"""
__props__ = ("axis", "dtype", "acc_dtype")
nfunc_spec = ("sum", 1, 1)
def __init__(self, axis=None, dtype=None, acc_dtype=None):
super().__init__(aes.add, axis=axis, dtype=dtype, acc_dtype=acc_dtype)
def __str__(self):
name = self.__class__.__name__
axis = ""
if self.axis is not None:
axis = ", ".join(str(x) for x in self.axis)
axis = f"axis=[{axis}], "
return f"{name}{{{axis}acc_dtype={self.acc_dtype}}}"
def L_op(self, inp, out, grads):
(x,) = inp
if out[0].dtype not in continuous_dtypes:
return [x.zeros_like(dtype=config.floatX)]
(gz,) = grads
gz = as_tensor_variable(gz)
axis = self.axis
if axis is None:
axis = list(range(x.type.ndim))
if axis == ():
return (gz,)
new_dims = []
i = 0
for j, _ in enumerate(x.type.broadcastable):
if j in axis:
new_dims.append("x")
else:
new_dims.append(i)
i += 1
ds_op = DimShuffle(gz.type.broadcastable, new_dims)
gx = Elemwise(aes.second)(x, ds_op(gz))
return [gx]
def R_op(self, inputs, eval_points):
# There is just one element in inputs and eval_points, the axis are
# part of self
if None in eval_points:
return [None]
return self(*eval_points, **dict(return_list=True))
def sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""
Computes the sum along the given axis(es) of a tensor `input`.
When axis is None (the default value), the sum is performed
over the flattened tensor.
For full documentation see `Sum`.
In particular please pay attention to the important warning when using
a custom acc_dtype.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out = Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
pprint.assign(Sum(), printing.FunctionPrinter("sum"))
class Prod(CAReduceDtype):
"""
Multiplies all the values of a tensor along the specified axis(es).
Equivalent to `CAReduce(scalar.mul, axis = axis)`, with the
difference that this defines the gradient of prod wrt its tensor
input.
"""
__props__ = ("axis", "dtype", "acc_dtype")
nfunc_spec = ("prod", 1, 1)
def __init__(self, axis=None, dtype=None, acc_dtype=None, no_zeros_in_input=False):
super().__init__(aes.mul, axis=axis, dtype=dtype, acc_dtype=acc_dtype)
self.no_zeros_in_input = no_zeros_in_input
def __setstate__(self, dct):
super().__setstate__(dct)
# Add default value to be able to reload old pickled objects.
if "no_zeros_in_input" not in dct:
self.no_zeros_in_input = False
def L_op(self, inp, out, grads):
"""
The grad of this Op could be very easy, if it is was not for the case
where zeros are present in a given "group" (ie. elements reduced
together to form the product).
If no zeros are found in the elements of the product, then the
partial derivative of the product relative to one of the elements
(one of the inputs) is simply the product of the other elements.
That's easy to see from the chain rule.
Now the trick (with no zeros) is to take the overall product, then
for every original element, the partial derivative is given by
this product divided by the element itself (which equals the product
of the other terms). This is easy to do by broadcasting the original
product.
(Note that we also need to broadcast-multiply by the
"incoming gradient", ie. the gradient of the cost relative to the
output/product).
With zeros, things get more complicated. For a given group, we have 3
cases:
* No zeros in the group. Use previous trick.
* If only one zero is present, then the gradient for that element is
non-zero, but is zero for all others.
* If more than one zero is present, then all the derivatives are zero.
For the last two cases (with 1 or more zeros), we can't use the
division trick, as this gives divisions by 0.
Implementing that case-by-case logic is not as trivial, so a bunch of
hacks are piled down here to do it. Notably, for the "only one zero"
case, there's a special Op that computes the product of the elements
in the group, minus the zero (see `ProdWithoutZeros`). The trick is then
to use the division trick for groups with no zero, to use the
`ProdWithoutZeros` op where there's only one zero, and to output a
derivative of zero for any element part of a group with more than
one zero.
I do this by first counting the number of zeros in each group (see the
`aet.eq` bits), then taking this or that behavior (see `aet.switch`)
based on the result of this count.
"""
(prod_in,) = inp
(gz,) = grads
if out[0].dtype in discrete_dtypes or self.acc_dtype in discrete_dtypes:
# There is an int conversion in the way
return [prod_in.zeros_like(dtype=config.floatX)]
# Prepare the broadcasting that is used everywhere to broadcast
# over the original groups (ie. broadcast over the elements of a given
# product)
gz = as_tensor_variable(gz)
axis = self.axis
if axis is None:
axis = list(range(prod_in.type.ndim))
if axis == ():
return (gz,)
new_dims = []
i = 0
for j, _ in enumerate(prod_in.type.broadcastable):
if j in axis:
new_dims.append("x")
else:
new_dims.append(i)
i += 1
# result of the product, broadcastable over groups
prod_out = self(prod_in).dimshuffle(new_dims)
# incoming gradient, broadcastable over groups
gz = gz.dimshuffle(new_dims)
# division trick if we don't have zeros. This will contain
# NaNs to be eliminated in the `aet.switch` if we do have zeros.
grad_case_without_zeros = gz * prod_out / prod_in
if self.no_zeros_in_input:
# this handles inputs with zeros, but only certain input shapes
return [grad_case_without_zeros]
else:
where_zeros = eq(prod_in, 0.0)
sum_where_zeros = sum(where_zeros, axis=self.axis)
groups_with_single_zero = eq(sum_where_zeros, 1).dimshuffle(new_dims)
# tensor with 0 everywhere except for those places where
# a 0 part of a group with a single zero was to be found
where_single_zero = groups_with_single_zero * where_zeros
# further optimization to avoid computing ProdWithoutZeros
# if the incoming gradient is 0
where_gz_not_zero = neq(gz, 0.0)
# only take ProdWithoutZeros for the groups with single zeros
# with non-null incoming gradient
where_to_take_prod_without_zeros = (
groups_with_single_zero * where_gz_not_zero
)
# preprocess the original input so that we set 0 everywhere
# except for groups that contain a single zero, to avoid computing
# multiplications on other groups
prod_without_zeros_in = where_to_take_prod_without_zeros * prod_in
# TODO: put lazy switch here, if it'd work
# this is pretty efficient already (no multiplication if 0), but
# it'd be even better if we had a lazy if per element
prod_without_zeros = ProdWithoutZeros(axis=self.axis)(prod_without_zeros_in)
prod_without_zeros = prod_without_zeros.dimshuffle(new_dims)
groups_without_zeros = eq(sum_where_zeros, 0).dimshuffle(new_dims)
final_grad = switch(
groups_without_zeros,
grad_case_without_zeros,
switch(where_single_zero, prod_without_zeros, 0.0) * gz,
)
return [final_grad]
def c_code_cache_version(self):
return (1,)
def prod(
input,
axis=None,
dtype=None,
keepdims=False,
acc_dtype=None,
no_zeros_in_input=False,
):
"""
Computes the product along the given axis(es) of a tensor `input`.
When axis is None (the default value), the product is performed
over the flattened tensor.
For full documentation see ``tensor.elemwise.Prod``.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out = Prod(
axis, dtype=dtype, acc_dtype=acc_dtype, no_zeros_in_input=no_zeros_in_input
)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
class MulWithoutZeros(BinaryScalarOp):
# "identity" here is zero, as in Reduce we don't want to start
# with reducing (1, something_else): this leads to the erroneous
# case where a vector of zeros is reduced by binary reductions
# of (1, 0), which always ends up as 1 (ie. the result for
# the c version, for the product of [0,0,0], is 1.0)
identity = 0.0
commutative = True
associative = True
def impl(self, x, y):
if x == 0:
return y
if y == 0:
return x
return x * y
def c_code(self, node, name, inp, out, sub):
x, y = inp
(z,) = out
return (
"%(z)s = ((%(x)s == 0) ? (%(y)s) : "
+ "((%(y)s == 0) ? (%(x)s) : ((%(y)s)*(%(x)s))) );"
) % locals()
def c_code_cache_version(self):
return (1,)
mul_without_zeros = MulWithoutZeros(aes.upcast_out, name="mul_without_zeros")
class ProdWithoutZeros(CAReduceDtype):
__props__ = ("axis", "dtype", "acc_dtype")
def __init__(self, axis=None, dtype=None, acc_dtype=None):
super().__init__(mul_without_zeros, axis=axis, dtype=dtype, acc_dtype=acc_dtype)
def grad(self, inp, grads):
from aesara.gradient import grad_not_implemented
(a,) = inp
a_grad = grad_not_implemented(
self,
0,
a,
"2nd derivatives of `product(a)` is not currently supported."
"If `a` is guaranteed to contains no zeros, use "
"`product(a, no_zeros_in_input=True)`.",
)
return [a_grad]
def any(x, axis=None, keepdims=False):
out = Any(axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
def all(x, axis=None, keepdims=False):
out = All(axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
def ptp(a, axis=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for peak to peak.
Parameters
----------
a
Input tensor.
axis
Axis along which to find the peaks. By default, flatten the array.
Returns
-------
array
A new array holding the result.
"""
a = as_tensor_variable(a)
out = max(a, axis) - min(a, axis)
return out
def power(x, y):
return x ** y
def logaddexp(*xs):
"""Logarithm of the sum of exponentiations of the inputs.
See ``numpy.logaddexp``.
Parameters
----------
xs : symbolic tensors
Input
Returns
-------
tensor
"""
return log(add(*[exp(x) for x in xs]))
def logsumexp(x, axis=None, keepdims=False):
"""Compute the log of the sum of exponentials of input elements.
See ``scipy.special.logsumexp``.
Parameters
----------
x : symbolic tensor
Input
axis : None or int or tuple of ints, optional
Axis or axes over which the sum is taken. By default axis is None,
and all elements are summed.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result will
broadcast correctly against the original array.
Returns
-------
tensor
"""
return log(sum(exp(x), axis=axis, keepdims=keepdims))
__all__ = [
"max_and_argmax",
"max",
"argmax",
"min",
"argmin",
"smallest",
"largest",
"lt",
"gt",
"le",
"ge",
"eq",
"neq",
"isnan",
"isinf",
"allclose",
"isclose",
"and_",
"bitwise_and",
"or_",
"bitwise_or",
"xor",
"bitwise_xor",
"invert",
"bitwise_not",
"abs",
"abs_",
"exp",
"exp2",
"expm1",
"neg",
"reciprocal",
"inv",
"log",
"log2",
"log10",
"log1p",
"sgn",
"ceil",
"floor",
"trunc",
"iround",
"round",
"round_half_to_even",
"round_half_away_from_zero",
"sqr",
"square",
"cov",
"sqrt",
"deg2rad",
"rad2deg",
"cos",
"arccos",
"sin",
"arcsin",
"tan",
"arctan",
"arctan2",
"cosh",
"arccosh",
"sinh",
"arcsinh",
"tanh",
"arctanh",
"erf",
"erfc",
"erfcx",
"erfinv",
"erfcinv",
"gamma",
"gammaln",
"psi",
"tri_gamma",
"chi2sf",
"gammainc",
"gammaincc",
"gammau",
"gammal",
"j0",
"j1",
"jv",
"i0",
"i1",
"iv",
"sigmoid",
"expit",
"softplus",
"log1pexp",
"log1mexp",
"betainc",
"real",
"imag",
"angle",
"complex",
"conj",
"complex_from_polar",
"sum",
"prod",
"mean",
"var",
"std",
"std",
"maximum",
"minimum",
"divmod",
"add",
"sub",
"mul",
"true_div",
"int_div",
"floor_div",
"ceil_intdiv",
"mod",
"pow",
"clip",
"dot",
"dense_dot",
"tensordot",
"outer",
"any",
"all",
"ptp",
"power",
"logaddexp",
"logsumexp",
]
| """Logistic sigmoid function (1 / (1 + exp(x)), also known as expit or inverse logit""" |
lib.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! A shim impelementation of the libm crate, binding directly to the in-tree libc's versions of
//! these functions.
#[macro_use]
extern crate static_assertions;
// Make sure we aren't building for one of the "esoteric systems" on which c_int is not identical | assert_type_eq_all!(std::os::raw::c_int, i32);
extern "C" {
#[link_name = "cbrt"]
fn cbrt_raw(x: f64) -> f64;
#[link_name = "frexpf"]
fn frexpf_raw(x: f32, exp: *mut i32) -> f32;
#[link_name = "ldexp"]
fn ldexp_raw(x: f64, n: i32) -> f64;
#[link_name = "ldexpf"]
fn ldexpf_raw(x: f32, n: i32) -> f32;
#[link_name = "modf"]
fn modf_raw(x: f64, integer_part: *mut f64) -> f64;
}
/// Cube root
#[inline]
pub fn cbrt(x: f64) -> f64 {
unsafe { cbrt_raw(x) }
}
/// Decomposes given floating point value x into a normalized fraction and an integral power of two.
#[inline]
pub fn frexpf(x: f32) -> (f32, i32) {
let mut exp: i32 = 0;
let v = unsafe { frexpf_raw(x, &mut exp) };
(v, exp)
}
/// Multiplies an f64 arg by the number 2 raised to the exp power.
#[inline]
pub fn ldexp(x: f64, n: i32) -> f64 {
unsafe { ldexp_raw(x, n) }
}
/// Multiplies an f32 arg by the number 2 raised to the exp power.
#[inline]
pub fn ldexpf(x: f32, n: i32) -> f32 {
unsafe { ldexpf_raw(x, n) }
}
/// Returns the fractional and integral parts of an f64. The return ordering `(fractional_part,
/// integral_part)` is based on the libm crate from crates.io.
#[inline]
pub fn modf(x: f64) -> (f64, f64) {
let mut integral_part = 0.0;
let fractional_part = unsafe { modf_raw(x, &mut integral_part) };
(fractional_part, integral_part)
} | // to i32 (https://doc.rust-lang.org/std/os/raw/type.c_int.html). |
main.rs | use clap::{load_yaml, App};
use num_format::{Locale, ToFormattedString};
use std::io::BufRead;
use reqwest::Client;
use std::fs::OpenOptions;
fn main() {
let yaml = load_yaml!("cli.yaml");
let m = App::from(yaml).get_matches();
if let Some(matches) = m.subcommand_matches("gen") {
if let Some(i) = matches.value_of("AMOUNT") |
} else if let Some(_matches) = m.subcommand_matches("check") {
#[allow(unused_imports)]
use std::io::{stdin, stdout, Write};
let url = String::new();
// Fetch range of lines to check
// TODO: Implement RANGE check
// if let Some(_i) = matches.value_of("RANGE") {
// let beginrange = 1;
// let endrange = 10;
// println!("Checking lines {}-{}", beginrange, endrange);
// } else {
// println!("No range specified, checking whole file");
// }
match check_codes(url) {
Ok(i) => {
println!("Code checking succeeded: {:#?}", i);
},
Err(e) => {
println!("Error occured during code checking: '{}'", e);
}
}
} else {
println!("No subcommands, try -h/--help");
}
}
// Check codes for validity against discord
fn check_codes(_url: String) -> Result<(), Box<dyn std::error::Error>> {
use std::io::{prelude::*, BufReader};
let file = std::fs::File::open("./codes.txt").expect("The file 'codes.txt' does not exist!");
let aaa = std::fs::File::create("valids.txt").expect("Could not create 'valids.txt'");
let buf = BufReader::new(file);
// Proxy stuff
// Not active right now, since proxies are not working
let lines: Vec<String> = buf
.lines()
.map(|l| l.expect("[ERROR] Could not parse line"))
.collect();
let client: reqwest::blocking::Client;
client = reqwest::blocking::Client::builder()
.build()?;
for (idx, item) in lines.iter().enumerate() {
let res = client.get(format!("https://discordapp.com/api/v6/entitlements/gift-codes/{}?with_application=false&with_subscription_plan=true", item).as_str())
.send();
let res_json: serde_json::Value;
match res {
Ok(i) => {
match i.text() {
Ok(ii) => {
res_json = serde_json::from_str(ii.as_str())?;
if res_json["code"] != 10038 && res_json["global"] != false {
println!("VALID CODE: {}", item);
let mut juhu = OpenOptions::new().append(true).open("valids.txt").expect("Could not open 'valids.txt'");
juhu.write_all(item.as_bytes()).expect("Writing 'valids.txt' failed");
} else {
if res_json["message"] == "You are being rate limited." {
let retry_length = res_json["retry_after"].as_i64().unwrap_or_default();
println!("{}. Rate limit, sleeping for {}ms", idx + 1, retry_length.to_formatted_string(&Locale::de));
let sleep_dur = res_json["retry_after"].as_i64().unwrap_or(50);
std::thread::sleep(std::time::Duration::from_millis(sleep_dur as u64));
} else {
println!("{}. invalid code", idx + 1);
}
}
},
Err(e) => {
println!("{}", e);
}
}
},
Err(e) => {
println!("[ERROR] {}", e);
}
}
}
Ok(())
}
fn generate_codes(amount: u64) -> String {
println!(
"Generating {} codes",
amount.to_formatted_string(&Locale::de)
);
let mut file = std::fs::File::create("codes.txt")
.expect("Creating 'codes.txt' failed, maybe the file is already there?");
let start = std::time::Instant::now();
let mut big_string = String::new();
use rand::prelude::*;
use std::io::Write;
for i in 0..amount {
let randchar: String = rand::thread_rng()
.sample_iter(&rand::distributions::Alphanumeric)
.take(16)
.map(char::from)
.collect();
// Append random code together with url & newline to BIGSTRING
//big_string.push_str(prefix.as_str());
big_string.push_str(randchar.as_str());
big_string.push_str("\n");
if i % 100_000 == 0 {
file.write_all(big_string.as_bytes())
.expect("Could'nt write to file");
big_string = String::from("");
}
}
file.write_all(big_string.as_bytes())
.expect("Couldn't write to file");
let generating_time = start.elapsed();
println!(
"Generated {} codes | Elapsed time: generating & saving: {:#?}",
amount.to_formatted_string(&Locale::de),
generating_time,
);
return String::from("");
}
| {
let amount = i.parse::<u64>();
match amount {
Ok(i) => {
generate_codes(i);
}
Err(_e) => {
println!("The input amount is not a number!");
}
}
} |
disallow_circular_no_inline_fragments.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::no_inline::NO_INLINE_DIRECTIVE_NAME;
use common::{Diagnostic, DiagnosticsResult, NamedItem};
use graphql_ir::{FragmentDefinition, FragmentSpread, OperationDefinition, Program, Validator};
use intern::string_key::{StringKey, StringKeyMap};
use thiserror::Error;
pub fn disallow_circular_no_inline_fragments(program: &Program) -> DiagnosticsResult<()> {
let mut validator = DisallowCircularNoInlineFragments::new(program);
validator.validate_program(program)
}
enum FragmentStatus {
Visiting,
Visited,
}
struct DisallowCircularNoInlineFragments<'program> {
program: &'program Program,
fragments: StringKeyMap<FragmentStatus>,
}
impl<'program> DisallowCircularNoInlineFragments<'program> {
fn new(program: &'program Program) -> Self {
Self {
program,
fragments: Default::default(),
}
}
}
impl Validator for DisallowCircularNoInlineFragments<'_> {
const NAME: &'static str = "disallow_circular_no_inline_fragments";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_operation(&mut self, _: &OperationDefinition) -> DiagnosticsResult<()> {
Ok(())
}
fn validate_fragment(&mut self, fragment: &FragmentDefinition) -> DiagnosticsResult<()> {
match self.fragments.get(&fragment.name.item) {
None => {
self.fragments
.insert(fragment.name.item, FragmentStatus::Visiting);
let result = self.validate_selections(&fragment.selections);
self.fragments
.insert(fragment.name.item, FragmentStatus::Visited);
result
}
Some(FragmentStatus::Visited) => Ok(()),
Some(FragmentStatus::Visiting) => {
if fragment
.directives
.named(*NO_INLINE_DIRECTIVE_NAME)
.is_some()
| else {
Ok(())
}
}
}
}
fn validate_fragment_spread(&mut self, spread: &FragmentSpread) -> DiagnosticsResult<()> {
let fragment = self.program.fragment(spread.fragment.item).unwrap();
self.validate_fragment(fragment)
}
}
#[derive(Debug, Error)]
enum ValidationMessage {
#[error("Found a circular reference from fragment '{fragment_name}'.")]
CircularFragmentReference { fragment_name: StringKey },
}
| {
Err(vec![Diagnostic::error(
ValidationMessage::CircularFragmentReference {
fragment_name: fragment.name.item,
},
fragment.name.location,
)])
} |
region.ts | export interface Region {
regId: number;
regName: string;
bankLink: number;
bankShortName: string;
}
export function compareRegions(c1: Region, c2: Region) {
const compare = c1.regName > c2.regName;
if (compare) {
return 1;
} else if ( c1.regName < c2.regName) {
return -1;
} else { return 0; }
}
export class | implements Region {
regId: number;
regName: string;
bankLink: number;
bankShortName: string;
constructor() {
this.regName = '';
this.bankLink = 0;
this.bankShortName = '';
}
}
| DefaultRegion |
config.js | const { Pool } = require('pg');
const AWS = require("aws-sdk");
const fs = require('fs');
const SQL = require('@nearform/sql');
const config = JSON.parse(fs.readFileSync(`${__dirname}/config.${process.env.ENV}.json`));
AWS.config = {
...AWS.config,
...config.aws.config
};
const sqs = new AWS.SQS();
function sendNewGlamMessage(glam) {
return sqs.sendMessage({
QueueUrl: config.aws.newGlamQueueUrl,
MessageBody: JSON.stringify(glam),
}).promise()
}
config.glamUser.realm = 'User area';
const glamUser = config.glamUser;
glamUser.users.push(config.admin);
config.admin.realm = 'Admin area';
const cassandraPgPool = new Pool(config.postgres);
const glams = {};
async function loadGlams() {
const query = `SELECT * FROM glams`;
const result = await cassandraPgPool.query(query)
result.rows.forEach(element => {
const glam = {
name: element.name,
fullname: element.fullname,
category: element.category,
image: element.image,
website: element.website,
connection: new Pool({
...config.postgres,
database: element.database
})
};
if (element.lastrun) {
glam.lastrun = element.lastrun;
} else {
glam.lastrun = null;
}
if (element.status) {
glam.status = element.status;
} else {
glam.status = null;
}
if (element['http-auth']) {
glam['http-auth'] = element['http-auth'];
glam['http-auth'].realm = element.name + " stats";
}
// Glams are never deleted
glams[glam.name] = glam;
})
return glams;
}
async function | (glam) {
const { name, fullname, category, image, database, website } = glam;
const query = SQL`INSERT INTO glams (name, fullname, category, image, database, status, website)
VALUES (${name}, ${fullname}, ${category}, ${image}, ${database}, 'pending', ${website || null})`;
await cassandraPgPool.query(query)
await sendNewGlamMessage({ name, fullname, category, image, database });
console.log(`Created new GLAM "${name}"`);
}
function updateGlam(glam) {
const { name, fullname, image, website } = glam;
const query = SQL`
UPDATE glams
SET fullname = ${fullname},
image = ${image},
website = ${website},
updated_at = NOW()
WHERE name = ${name}
`;
return cassandraPgPool.query(query);
}
module.exports = {
...config,
glamUser,
glams,
loadGlams,
insertGlam,
updateGlam,
cassandraPgPool
} | insertGlam |
hooks.go | package fixtures
var HookPush = `
{
"ref": "refs/heads/master",
"before": "4b2626259b5a97b6b4eab5e6cca66adb986b672b",
"after": "ef98532add3b2feb7a137426bba1248724367df5",
"compare_url": "http://gogs.golang.org/gordon/hello-world/compare/4b2626259b5a97b6b4eab5e6cca66adb986b672b...ef98532add3b2feb7a137426bba1248724367df5",
"commits": [
{
"id": "ef98532add3b2feb7a137426bba1248724367df5",
"message": "bump\n",
"url": "http://gogs.golang.org/gordon/hello-world/commit/ef98532add3b2feb7a137426bba1248724367df5",
"author": {
"name": "Gordon the Gopher",
"email": "[email protected]",
"username": "gordon"
}
}
],
"repository": { | "name": "hello-world",
"url": "http://gogs.golang.org/gordon/hello-world",
"description": "",
"website": "",
"watchers": 1,
"owner": {
"name": "gordon",
"email": "[email protected]",
"username": "gordon"
},
"private": true
},
"pusher": {
"name": "gordon",
"email": "[email protected]",
"username": "gordon"
},
"sender": {
"login": "gordon",
"id": 1,
"avatar_url": "http://gogs.golang.org///1.gravatar.com/avatar/8c58a0be77ee441bb8f8595b7f1b4e87"
}
}
` | "id": 1, |
test_email_backend.py | """Custom email backend for testing the project."""
import re
from django.core.mail.backends.smtp import EmailBackend as SmtpEmailBackend
from django.core.mail.message import sanitize_address
from . import default_settings as settings
class EmailBackend(SmtpEmailBackend):
"""
Email backend that sends all emails to a defined address, no matter what
the recipient really is.
In order to use it, set this in your local_settings.py::
EMAIL_BACKEND = 'django_libs.test_email_backend.EmailBackend'
TEST_EMAIL_BACKEND_RECIPIENTS = (
('Name', '[email protected]'),
)
"""
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients() or \
not settings.TEST_EMAIL_BACKEND_RECIPIENTS:
return False
from_email = sanitize_address(
email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for name, addr in settings.TEST_EMAIL_BACKEND_RECIPIENTS]
try:
self.connection.sendmail(
from_email, recipients, email_message.message().as_string())
except:
if not self.fail_silently:
raise
return False
return True
class WhitelistEmailBackend(SmtpEmailBackend):
"""
Email backend that sends only these emails, that match the whitelist
setting.
In order to use it, set this in your local_settings.py::
EMAIL_BACKEND = 'django_libs.test_email_backend.EmailBackend'
EMAIL_BACKEND_WHITELIST = [
r'.*@example\.com',
]
This setting would allow all emails to @example.com to be sent and all
others are discarded. The setting expects regex, so better test it before
adding it here to prevent errors.
If the setting does not exist, no emails are sent at all.
"""
def _send(self, email_message):
|
def clean_recipients(self, email_message):
"""Removes all the unallowed recipients."""
new_recipients = []
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
for recipient in recipients:
if self.matches_whitelist(recipient):
new_recipients.append(recipient)
elif settings.EMAIL_BACKEND_REROUTE_BLACKLIST:
for name, addr in settings.TEST_EMAIL_BACKEND_RECIPIENTS:
new_recipients.append(addr)
# remove duplicates
new_recipients = list(set(new_recipients))
return new_recipients
def matches_whitelist(self, recipient):
"""Checks if the email address matches one of the whitelist entries."""
matches = False
for entry in settings.EMAIL_BACKEND_WHITELIST:
if re.match(entry, recipient):
matches = True
return matches
| """A helper method that does the actual sending."""
from_email = sanitize_address(
email_message.from_email, email_message.encoding)
recipients = self.clean_recipients(email_message)
if not recipients:
return False
try:
self.connection.sendmail(
from_email, recipients, email_message.message().as_string())
except:
if not self.fail_silently:
raise
return False
return True |
weaviate_wellknown_liveness_parameters.go | // _ _
// __ _____ __ ___ ___ __ _| |_ ___
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
// \ V V / __/ (_| |\ V /| | (_| | || __/
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
//
// Copyright © 2016 - 2022 SeMI Technologies B.V. All rights reserved.
//
// CONTACT: [email protected]
//
// Code generated by go-swagger; DO NOT EDIT.
package operations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime/middleware"
)
// NewWeaviateWellknownLivenessParams creates a new WeaviateWellknownLivenessParams object
// no default values defined in spec.
func NewWeaviateWellknownLivenessParams() WeaviateWellknownLivenessParams { |
// WeaviateWellknownLivenessParams contains all the bound params for the weaviate wellknown liveness operation
// typically these are obtained from a http.Request
//
// swagger:parameters weaviate.wellknown.liveness
type WeaviateWellknownLivenessParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewWeaviateWellknownLivenessParams() beforehand.
func (o *WeaviateWellknownLivenessParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
|
return WeaviateWellknownLivenessParams{}
}
|
attribute.go | /*
* Copyright 2017 XLAB d.o.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package cl
import (
"errors"
"fmt"
"math/big"
"strconv"
"github.com/spf13/viper"
)
type Attrs struct {
// attributes that are Known to the credential receiver and issuer
Known []*big.Int
// attributes which are Known only to the credential receiver
Hidden []*big.Int
// attributes for which the issuer knows only commitment
Committed []*big.Int
}
func NewAttrs(known, committed, hidden []*big.Int) *Attrs {
return &Attrs{
Known: known,
Hidden: hidden,
Committed: committed,
}
}
func (a *Attrs) join() []*big.Int {
all := make([]*big.Int, 0)
all = append(all, a.Known...)
all = append(all, a.Hidden...)
all = append(all, a.Committed...)
return all
}
// AttrCount holds the number of Known, Committed and
// Hidden parameters.
type AttrCount struct {
Known int
Committed int
Hidden int
}
func NewAttrCount(known, committed, hidden int) *AttrCount {
return &AttrCount{
Known: known,
Committed: committed,
Hidden: hidden,
}
}
func (c *AttrCount) String() string {
return fmt.Sprintf("Known: %d\ncommitted: %d\nhidden: %d\n",
c.Known, c.Committed, c.Hidden)
}
// CredAttr represents an attribute for the CL scheme.
type CredAttr interface {
getIndex() int
getValue() interface{}
getCond() AttrCond
UpdateValue(interface{}) error
internalValue() *big.Int
updateInternalValue(*big.Int) error
setInternalValue() error
isKnown() bool
hasVal() bool
Name() string
String() string
Validatable
}
// Validatable validates against a credential attribute.
type Validatable interface{
ValidateAgainst(interface{}) (bool, error)
}
type AttrCond int
const (
lessThan AttrCond = iota
lessThanOrEqual
greaterThan
greaterThanOrEqual
equal
none
)
var attrCondStr = []string{"lt", "lte", "gt", "gte", "equal", "none"}
func (c AttrCond) String() string {
return attrCondStr[c]
}
func parseAttrCond(cond string) (AttrCond, error) {
for i, c := range attrCondStr {
if cond == c {
return AttrCond(i), nil
}
}
return -1, fmt.Errorf("invalid condition '%s'", cond)
}
// Attr is part of a credential (RawCredential). In the case of digital identity credential,
// attributes could be for example name, Gender, Date of Birth. In the case of a credential allowing
// access to some internet service (like electronic newspaper), attributes could be
// Type (for example only news related to politics) of the service and Date of Expiration.
type Attr struct {
name string
Known bool
ValSet bool
Val *big.Int
cond AttrCond
Index int
}
func | (name string, known bool) *Attr {
return &Attr{
name: name,
Known: known,
ValSet: false,
}
}
func (a *Attr) getIndex() int {
return a.Index
}
func (a *Attr) getCond() AttrCond {
return a.cond
}
func (a *Attr) isKnown() bool {
return a.Known
}
func (a *Attr) internalValue() *big.Int {
return a.Val
}
func (a *Attr) hasVal() bool {
return a.ValSet
}
func (a *Attr) Name() string {
return a.name
}
func (a *Attr) String() string {
tag := "Known"
if !a.isKnown() {
tag = "revealed"
}
return fmt.Sprintf("%s (%s)", a.name, tag)
}
type Int64Attr struct {
Val int64
*Attr
}
func NewEmptyInt64Attr(name string, known bool) *Int64Attr {
return &Int64Attr{
Attr: newAttr(name, known),
}
}
func NewInt64Attr(name string, val int64, known bool) (*Int64Attr,
error) {
a := &Int64Attr{
Val: val,
Attr: newAttr(name, known),
}
if err := a.setInternalValue(); err != nil {
return nil, err
}
return a, nil
}
func (a *Int64Attr) setInternalValue() error {
a.Attr.Val = big.NewInt(int64(a.Val)) // FIXME
a.ValSet = true
return nil
}
func (a *Int64Attr) updateInternalValue(val *big.Int) error {
v, err := strconv.Atoi(val.String())
if err != nil {
return err
}
a.Val = int64(v)
return nil
}
func (a *Int64Attr) getValue() interface{} {
return a.Val
}
func (a *Int64Attr) UpdateValue(n interface{}) error {
switch n.(type) {
case int:
a.Val = int64(n.(int))
case int64:
a.Val = n.(int64)
}
return a.setInternalValue()
}
func (a *Int64Attr) ValidateAgainst(v interface{}) (bool, error) {
actual, ok := v.(int64)
if !ok {
return false, fmt.Errorf("value provided for '%s' is not int64",
a.Name())
}
var res bool
switch a.cond {
case greaterThan:
res = actual > a.Val
case greaterThanOrEqual:
res = actual >= a.Val
case lessThan:
res = actual < a.Val
case lessThanOrEqual:
res = actual <= a.Val
case equal:
res = actual == a.Val
default:
return false, errors.New("invalid condition")
}
return res, nil
}
func (a *Int64Attr) String() string {
return fmt.Sprintf("%s, type = %T", a.Attr.String(), a.Val)
}
type StrAttr struct {
Val string
*Attr
}
func NewEmptyStrAttr(name string, known bool) *StrAttr {
return &StrAttr{
Attr: newAttr(name, known),
}
}
func NewStrAttr(name, val string, known bool) (*StrAttr,
error) {
a := &StrAttr{
Val: val,
Attr: newAttr(name, known),
}
if err := a.setInternalValue(); err != nil {
return nil, err
}
return a, nil
}
func (a *StrAttr) setInternalValue() error {
a.Attr.Val = new(big.Int).SetBytes([]byte(a.Val)) // FIXME
a.ValSet = true
return nil
}
func (a *StrAttr) updateInternalValue(val *big.Int) error {
v := string(val.Bytes())
a.Val = v
return nil
}
func (a *StrAttr) getValue() interface{} {
return a.Val
}
func (a *StrAttr) UpdateValue(s interface{}) error {
a.Val = s.(string)
return a.setInternalValue()
}
func (a *StrAttr) ValidateAgainst(v interface{}) (bool, error) {
actual, ok := v.(string)
if !ok {
return false, fmt.Errorf("value provided for '%s' is not string",
a.Name())
}
if a.cond != equal {
return false, errors.New("invalid condition")
}
return actual == a.Val, nil
}
func (a *StrAttr) String() string {
return fmt.Sprintf("%s, type = %T", a.Attr.String(), a.Val)
}
// FIXME make nicer
// Hook to organization?
func parseAttrs(v *viper.Viper) ([]CredAttr, *AttrCount, error) {
if !v.IsSet("attributes") {
return nil, nil, fmt.Errorf("missing attributes declaration")
}
specs := v.GetStringMap("attributes")
attrs := make([]CredAttr, len(specs))
var nKnown, nCommitted int
for name, val := range specs { // TODO enforce proper ordering with Index
data, ok := val.(map[string]interface{})
if !ok {
return nil, nil, fmt.Errorf("invalid configuration")
}
index, ok := data["index"]
if !ok {
return nil, nil, fmt.Errorf("missing attribute index")
}
i, ok := index.(int)
if !ok {
return nil, nil, fmt.Errorf("Index must be an integer")
}
if i >= len(attrs) {
return nil, nil,
fmt.Errorf("Index too large for the provided number of attributes")
}
if attrs[i] != nil {
return nil, nil,
fmt.Errorf("duplicate index")
}
t, ok := data["type"]
if !ok {
return nil, nil, fmt.Errorf("missing type specifier")
}
known := true
k, ok := data["known"]
if ok {
res, err := strconv.ParseBool(k.(string))
if err != nil {
return nil, nil, fmt.Errorf("Known must be true or false")
}
known = res
}
if known {
nKnown++
} else {
nCommitted++
}
var condition AttrCond
cond, ok := data["cond"]
if !ok {
condition = none
} else {
c, err := parseAttrCond(cond.(string))
if err != nil {
return nil, nil, err
}
condition = c
}
switch t {
case "string":
a, err := NewStrAttr(name, "", known) // FIXME
if err != nil {
return nil, nil, err
}
a.cond = condition
attrs[i] = a
a.Index = i
case "int64":
a, err := NewInt64Attr(name, 0, known) // FIXME
if err != nil {
return nil, nil, err
}
a.cond = condition
attrs[i] = a
a.Index = i
default:
return nil, nil, fmt.Errorf("unsupported attribute type: %s", t)
}
i++
}
// TODO Hidden params
return attrs, NewAttrCount(nKnown, nCommitted, 0), nil
}
| newAttr |
flux_response.py | # coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class FluxResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'flux': 'str'
}
attribute_map = {
'flux': 'flux'
}
def __init__(self, flux=None): # noqa: E501
"""FluxResponse - a model defined in OpenAPI""" # noqa: E501
self._flux = None
self.discriminator = None
if flux is not None:
self.flux = flux
@property
def flux(self):
"""Gets the flux of this FluxResponse. # noqa: E501
:return: The flux of this FluxResponse. # noqa: E501
:rtype: str
"""
return self._flux
@flux.setter
def flux(self, flux):
"""Sets the flux of this FluxResponse.
:param flux: The flux of this FluxResponse. # noqa: E501
:type: str
"""
self._flux = flux
def to_dict(self):
|
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FluxResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| """Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result |
1080p_187.ts | version https://git-lfs.github.com/spec/v1
oid sha256:41a4985b10f4406224325fe235a6b2804edd54ce91e2b60e8267d7f5b63a976a | size 1468280 | |
json_test.go | ///////////////////////////////////////////////////////////////////////
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
///////////////////////////////////////////////////////////////////////
package parser
import (
"bytes"
"encoding/json"
"testing"
"time"
"github.com/google/go-cmp/cmp"
uuid "github.com/satori/go.uuid"
"github.com/stretchr/testify/assert"
"github.com/vmware/dispatch/pkg/events"
)
var testEvent1 = events.CloudEvent{
Namespace: "dispatchframework.io",
EventType: "test.event",
EventTypeVersion: "0.1",
CloudEventsVersion: events.CloudEventsVersion,
SourceType: "test.source",
SourceID: "test.source.id",
EventID: uuid.NewV4().String(),
EventTime: time.Now(),
SchemaURL: "http://some.url.com/file",
ContentType: "application/json",
Extensions: nil,
Data: `{"example":"value"}`,
}
func eventJSON(event *events.CloudEvent) []byte {
val, _ := json.Marshal(event)
return val
}
func TestParsingEmptyList(t *testing.T) {
buf := bytes.NewBufferString("[]")
p := &JSONEventParser{}
evs, err := p.Parse(buf)
assert.NoError(t, err)
assert.Len(t, evs, 0)
}
func TestParsingMalformed(t *testing.T) |
func TestParsingCorrect(t *testing.T) {
buf := bytes.NewBuffer(eventJSON(&testEvent1))
p := &JSONEventParser{}
evs, err := p.Parse(buf)
assert.NoError(t, err)
assert.Len(t, evs, 1)
assert.True(t, cmp.Equal(testEvent1, evs[0]))
}
| {
buf := bytes.NewBufferString("{gdsgsdgs}")
p := &JSONEventParser{}
evs, err := p.Parse(buf)
assert.Error(t, err)
assert.Len(t, evs, 0)
} |
pages.module.ts | import { CommonModule } from '@angular/common';
import { NgModule } from '@angular/core';
import { PagesRoutingModule } from './pages-routing.module';
import { AboutPageComponent } from './about-page.component';
@NgModule({
imports: [
CommonModule,
PagesRoutingModule
],
declarations: [
AboutPageComponent
]
})
export class | {}
| PagesModule |
writeback.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Type resolution: the phase that finds all the types in the AST with
// unresolved type variables and replaces "ty_var" types with their
// substitutions.
use self::ResolveReason::*;
use astconv::AstConv;
use check::FnCtxt;
use middle::pat_util;
use middle::ty::{self, Ty, MethodCall, MethodCallee};
use middle::ty_fold::{TypeFolder,TypeFoldable};
use middle::infer;
use write_substs_to_tcx;
use write_ty_to_tcx;
use util::ppaux::Repr;
use std::cell::Cell;
use syntax::ast;
use syntax::codemap::{DUMMY_SP, Span};
use syntax::print::pprust::pat_to_string;
use syntax::visit;
use syntax::visit::Visitor;
///////////////////////////////////////////////////////////////////////////
// Entry point functions
pub fn resolve_type_vars_in_expr(fcx: &FnCtxt, e: &ast::Expr) {
assert_eq!(fcx.writeback_errors.get(), false);
let mut wbcx = WritebackCx::new(fcx);
wbcx.visit_expr(e);
wbcx.visit_upvar_borrow_map();
wbcx.visit_closures();
wbcx.visit_object_cast_map();
}
pub fn resolve_type_vars_in_fn(fcx: &FnCtxt,
decl: &ast::FnDecl,
blk: &ast::Block) {
assert_eq!(fcx.writeback_errors.get(), false);
let mut wbcx = WritebackCx::new(fcx);
wbcx.visit_block(blk);
for arg in decl.inputs.iter() {
wbcx.visit_node_id(ResolvingPattern(arg.pat.span), arg.id);
wbcx.visit_pat(&*arg.pat);
// Privacy needs the type for the whole pattern, not just each binding
if !pat_util::pat_is_binding(&fcx.tcx().def_map, &*arg.pat) {
wbcx.visit_node_id(ResolvingPattern(arg.pat.span),
arg.pat.id);
}
}
wbcx.visit_upvar_borrow_map();
wbcx.visit_closures();
wbcx.visit_object_cast_map();
}
///////////////////////////////////////////////////////////////////////////
// The Writerback context. This visitor walks the AST, checking the
// fn-specific tables to find references to types or regions. It
// resolves those regions to remove inference variables and writes the
// final result back into the master tables in the tcx. Here and
// there, it applies a few ad-hoc checks that were not convenient to
// do elsewhere.
struct WritebackCx<'cx, 'tcx: 'cx> {
fcx: &'cx FnCtxt<'cx, 'tcx>,
}
impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
fn new(fcx: &'cx FnCtxt<'cx, 'tcx>) -> WritebackCx<'cx, 'tcx> {
WritebackCx { fcx: fcx }
}
fn tcx(&self) -> &'cx ty::ctxt<'tcx> {
self.fcx.tcx()
}
}
///////////////////////////////////////////////////////////////////////////
// Impl of Visitor for Resolver
//
// This is the master code which walks the AST. It delegates most of
// the heavy lifting to the generic visit and resolve functions
// below. In general, a function is made into a `visitor` if it must
// traffic in node-ids or update tables in the type context etc.
impl<'cx, 'tcx, 'v> Visitor<'v> for WritebackCx<'cx, 'tcx> {
fn visit_item(&mut self, _: &ast::Item) {
// Ignore items
}
fn visit_stmt(&mut self, s: &ast::Stmt) {
if self.fcx.writeback_errors.get() {
return;
}
self.visit_node_id(ResolvingExpr(s.span), ty::stmt_node_id(s));
visit::walk_stmt(self, s);
}
fn visit_expr(&mut self, e: &ast::Expr) {
if self.fcx.writeback_errors.get() {
return;
}
self.visit_node_id(ResolvingExpr(e.span), e.id);
self.visit_method_map_entry(ResolvingExpr(e.span),
MethodCall::expr(e.id));
match e.node {
ast::ExprClosure(_, _, ref decl, _) => {
for input in decl.inputs.iter() {
let _ = self.visit_node_id(ResolvingExpr(e.span),
input.id);
}
}
_ => {}
}
visit::walk_expr(self, e);
}
fn visit_block(&mut self, b: &ast::Block) {
if self.fcx.writeback_errors.get() {
return;
}
self.visit_node_id(ResolvingExpr(b.span), b.id);
visit::walk_block(self, b);
}
fn visit_pat(&mut self, p: &ast::Pat) {
if self.fcx.writeback_errors.get() {
return;
}
self.visit_node_id(ResolvingPattern(p.span), p.id);
debug!("Type for pattern binding {} (id {}) resolved to {}",
pat_to_string(p),
p.id,
ty::node_id_to_type(self.tcx(), p.id).repr(self.tcx()));
visit::walk_pat(self, p);
}
fn visit_local(&mut self, l: &ast::Local) |
fn visit_ty(&mut self, t: &ast::Ty) {
match t.node {
ast::TyFixedLengthVec(ref ty, ref count_expr) => {
self.visit_ty(&**ty);
write_ty_to_tcx(self.tcx(), count_expr.id, self.tcx().types.uint);
}
_ => visit::walk_ty(self, t)
}
}
}
impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
fn visit_upvar_borrow_map(&self) {
if self.fcx.writeback_errors.get() {
return;
}
for (upvar_id, upvar_capture) in self.fcx.inh.upvar_capture_map.borrow().iter() {
let new_upvar_capture = match *upvar_capture {
ty::UpvarCapture::ByValue => ty::UpvarCapture::ByValue,
ty::UpvarCapture::ByRef(ref upvar_borrow) => {
let r = upvar_borrow.region;
let r = self.resolve(&r, ResolvingUpvar(*upvar_id));
ty::UpvarCapture::ByRef(
ty::UpvarBorrow { kind: upvar_borrow.kind, region: r })
}
};
debug!("Upvar capture for {} resolved to {}",
upvar_id.repr(self.tcx()),
new_upvar_capture.repr(self.tcx()));
self.fcx.tcx().upvar_capture_map.borrow_mut().insert(*upvar_id, new_upvar_capture);
}
}
fn visit_closures(&self) {
if self.fcx.writeback_errors.get() {
return
}
for (def_id, closure) in self.fcx.inh.closures.borrow().iter() {
let closure_ty = self.resolve(&closure.closure_type,
ResolvingClosure(*def_id));
let closure = ty::Closure {
closure_type: closure_ty,
kind: closure.kind,
};
self.fcx.tcx().closures.borrow_mut().insert(*def_id, closure);
}
}
fn visit_object_cast_map(&self) {
if self.fcx.writeback_errors.get() {
return
}
for (&node_id, trait_ref) in self.fcx
.inh
.object_cast_map
.borrow()
.iter()
{
let span = ty::expr_span(self.tcx(), node_id);
let reason = ResolvingExpr(span);
let closure_ty = self.resolve(trait_ref, reason);
self.tcx()
.object_cast_map
.borrow_mut()
.insert(node_id, closure_ty);
}
}
fn visit_node_id(&self, reason: ResolveReason, id: ast::NodeId) {
// Resolve any borrowings for the node with id `id`
self.visit_adjustments(reason, id);
// Resolve the type of the node with id `id`
let n_ty = self.fcx.node_ty(id);
let n_ty = self.resolve(&n_ty, reason);
write_ty_to_tcx(self.tcx(), id, n_ty);
debug!("Node {} has type {}", id, n_ty.repr(self.tcx()));
// Resolve any substitutions
self.fcx.opt_node_ty_substs(id, |item_substs| {
write_substs_to_tcx(self.tcx(), id,
self.resolve(item_substs, reason));
});
}
fn visit_adjustments(&self, reason: ResolveReason, id: ast::NodeId) {
match self.fcx.inh.adjustments.borrow_mut().remove(&id) {
None => {
debug!("No adjustments for node {}", id);
}
Some(adjustment) => {
let adj_object = ty::adjust_is_object(&adjustment);
let resolved_adjustment = match adjustment {
ty::AdjustReifyFnPointer(def_id) => {
ty::AdjustReifyFnPointer(def_id)
}
ty::AdjustDerefRef(adj) => {
for autoderef in 0..adj.autoderefs {
let method_call = MethodCall::autoderef(id, autoderef);
self.visit_method_map_entry(reason, method_call);
}
if adj_object {
let method_call = MethodCall::autoobject(id);
self.visit_method_map_entry(reason, method_call);
}
ty::AdjustDerefRef(ty::AutoDerefRef {
autoderefs: adj.autoderefs,
autoref: self.resolve(&adj.autoref, reason),
})
}
};
debug!("Adjustments for node {}: {:?}", id, resolved_adjustment);
self.tcx().adjustments.borrow_mut().insert(
id, resolved_adjustment);
}
}
}
fn visit_method_map_entry(&self,
reason: ResolveReason,
method_call: MethodCall) {
// Resolve any method map entry
match self.fcx.inh.method_map.borrow_mut().remove(&method_call) {
Some(method) => {
debug!("writeback::resolve_method_map_entry(call={:?}, entry={})",
method_call,
method.repr(self.tcx()));
let new_method = MethodCallee {
origin: self.resolve(&method.origin, reason),
ty: self.resolve(&method.ty, reason),
substs: self.resolve(&method.substs, reason),
};
self.tcx().method_map.borrow_mut().insert(
method_call,
new_method);
}
None => {}
}
}
fn resolve<T:TypeFoldable<'tcx>>(&self, t: &T, reason: ResolveReason) -> T {
t.fold_with(&mut Resolver::new(self.fcx, reason))
}
}
///////////////////////////////////////////////////////////////////////////
// Resolution reason.
#[derive(Copy)]
enum ResolveReason {
ResolvingExpr(Span),
ResolvingLocal(Span),
ResolvingPattern(Span),
ResolvingUpvar(ty::UpvarId),
ResolvingClosure(ast::DefId),
}
impl ResolveReason {
fn span(&self, tcx: &ty::ctxt) -> Span {
match *self {
ResolvingExpr(s) => s,
ResolvingLocal(s) => s,
ResolvingPattern(s) => s,
ResolvingUpvar(upvar_id) => {
ty::expr_span(tcx, upvar_id.closure_expr_id)
}
ResolvingClosure(did) => {
if did.krate == ast::LOCAL_CRATE {
ty::expr_span(tcx, did.node)
} else {
DUMMY_SP
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////
// The Resolver. This is the type folding engine that detects
// unresolved types and so forth.
struct Resolver<'cx, 'tcx: 'cx> {
tcx: &'cx ty::ctxt<'tcx>,
infcx: &'cx infer::InferCtxt<'cx, 'tcx>,
writeback_errors: &'cx Cell<bool>,
reason: ResolveReason,
}
impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
fn new(fcx: &'cx FnCtxt<'cx, 'tcx>,
reason: ResolveReason)
-> Resolver<'cx, 'tcx>
{
Resolver::from_infcx(fcx.infcx(), &fcx.writeback_errors, reason)
}
fn from_infcx(infcx: &'cx infer::InferCtxt<'cx, 'tcx>,
writeback_errors: &'cx Cell<bool>,
reason: ResolveReason)
-> Resolver<'cx, 'tcx>
{
Resolver { infcx: infcx,
tcx: infcx.tcx,
writeback_errors: writeback_errors,
reason: reason }
}
fn report_error(&self, e: infer::fixup_err) {
self.writeback_errors.set(true);
if !self.tcx.sess.has_errors() {
match self.reason {
ResolvingExpr(span) => {
span_err!(self.tcx.sess, span, E0101,
"cannot determine a type for this expression: {}",
infer::fixup_err_to_string(e));
}
ResolvingLocal(span) => {
span_err!(self.tcx.sess, span, E0102,
"cannot determine a type for this local variable: {}",
infer::fixup_err_to_string(e));
}
ResolvingPattern(span) => {
span_err!(self.tcx.sess, span, E0103,
"cannot determine a type for this pattern binding: {}",
infer::fixup_err_to_string(e));
}
ResolvingUpvar(upvar_id) => {
let span = self.reason.span(self.tcx);
span_err!(self.tcx.sess, span, E0104,
"cannot resolve lifetime for captured variable `{}`: {}",
ty::local_var_name_str(self.tcx, upvar_id.var_id).get().to_string(),
infer::fixup_err_to_string(e));
}
ResolvingClosure(_) => {
let span = self.reason.span(self.tcx);
span_err!(self.tcx.sess, span, E0196,
"cannot determine a type for this closure")
}
}
}
}
}
impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> {
fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.tcx
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
match self.infcx.fully_resolve(&t) {
Ok(t) => t,
Err(e) => {
debug!("Resolver::fold_ty: input type `{}` not fully resolvable",
t.repr(self.tcx));
self.report_error(e);
self.tcx().types.err
}
}
}
fn fold_region(&mut self, r: ty::Region) -> ty::Region {
match self.infcx.fully_resolve(&r) {
Ok(r) => r,
Err(e) => {
self.report_error(e);
ty::ReStatic
}
}
}
}
///////////////////////////////////////////////////////////////////////////
// During type check, we store promises with the result of trait
// lookup rather than the actual results (because the results are not
// necessarily available immediately). These routines unwind the
// promises. It is expected that we will have already reported any
// errors that may be encountered, so if the promises store an error,
// a dummy result is returned.
| {
if self.fcx.writeback_errors.get() {
return;
}
let var_ty = self.fcx.local_ty(l.span, l.id);
let var_ty = self.resolve(&var_ty, ResolvingLocal(l.span));
write_ty_to_tcx(self.tcx(), l.id, var_ty);
visit::walk_local(self, l);
} |
extension-manager.helpers.spec.ts | import { TestExtension } from '@remirror/test-fixtures';
import { transformExtensionMap } from '../extension-manager.helpers';
import { DocExtension, TextExtension } from '../nodes';
describe('transformExtensionMap', () => { | const test = new TestExtension();
const text = new TextExtension();
const extensions = [
{ extension: doc, priority: 2 },
{ extension: test, priority: 2 },
{ extension: text, priority: 2 },
];
expect(transformExtensionMap(extensions)).toEqual([doc, test, text]);
});
it('sorts the extensions by priority', () => {
const doc = new DocExtension();
const test = new TestExtension();
const text = new TextExtension();
const extensions = [
{ extension: doc, priority: 1 },
{ extension: test, priority: 2 },
{ extension: text, priority: -1 },
];
expect(transformExtensionMap(extensions)).toEqual([text, doc, test]);
});
it('can sort with default priority', () => {
const doc = new DocExtension();
const test = new TestExtension();
const text = new TextExtension();
const extensions = [{ extension: doc, priority: 1 }, test, { extension: text, priority: -1 }];
expect(transformExtensionMap(extensions)).toEqual([text, doc, test]);
});
}); | it('maps the extensions', () => {
const doc = new DocExtension(); |
snapshot_test.go | package snapshot
import (
"os"
"testing"
"github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
menv "github.com/openebs/maya/pkg/env/v1alpha1"
v1_storage "k8s.io/api/storage/v1"
)
func TestGetCreateCASTemplate(t *testing.T) {
sc := &v1_storage.StorageClass{}
sc.Annotations = make(map[string]string)
tests := map[string]struct {
scCreateCASAnnotation string
scCASTypeAnnotation string
envJivaCAST string
envCStorCAST string
expectedCAST string
}{
"CAST annotation is present": {
"cast-create-from-annotation", | "",
"",
"cast-create-from-annotation",
},
"CAST annotation is absent/empty and cas type is cstor": {
"",
"cstor",
"",
"cast-cstor-create-from-env",
"cast-cstor-create-from-env",
},
"CAST annotation is absent/empty and cas type is jiva": {
"",
"jiva",
"cast-jiva-create-from-env",
"",
"cast-jiva-create-from-env",
},
"CAST annotation is absent/empty and cas type unknown": {
"",
"unknown",
"cast-jiva-create-from-env",
"cast-cstor-create-from-env",
"",
},
}
defer func() {
os.Unsetenv(string(menv.CASTemplateToCreateCStorSnapshotENVK))
os.Unsetenv(string(menv.CASTemplateToCreateJivaSnapshotENVK))
}()
for name, test := range tests {
t.Run(name, func(t *testing.T) {
sc.Annotations[string(v1alpha1.CASTemplateKeyForSnapshotCreate)] = test.scCreateCASAnnotation
sc.Annotations[string(v1alpha1.CASTypeKey)] = test.scCASTypeAnnotation
os.Setenv(string(menv.CASTemplateToCreateCStorSnapshotENVK), test.envCStorCAST)
os.Setenv(string(menv.CASTemplateToCreateJivaSnapshotENVK), test.envJivaCAST)
castName := getCreateCASTemplate(sc)
if castName != test.expectedCAST {
t.Fatalf("unexpected cast name, wanted %q got %q", test.expectedCAST, castName)
}
})
}
}
func TestGetReadCASTemplate(t *testing.T) {
sc := &v1_storage.StorageClass{}
sc.Annotations = make(map[string]string)
tests := map[string]struct {
scReadCASAnnotation string
scCASTypeAnnotation string
envJivaCAST string
envCStorCAST string
expectedCAST string
}{
"CAST annotation is present": {
"cast-read-from-annotation",
"",
"",
"",
"cast-read-from-annotation",
},
"CAST annotation is absent/empty and cas type is cstor": {
"",
"cstor",
"",
"cast-cstor-read-from-env",
"cast-cstor-read-from-env",
},
"CAST annotation is absent/empty and cas type is jiva": {
"",
"jiva",
"cast-jiva-read-from-env",
"",
"cast-jiva-read-from-env",
},
"CAST annotation is absent/empty and cas type unknown": {
"",
"unknown",
"cast-jiva-read-from-env",
"cast-cstor-read-from-env",
"",
},
}
defer func() {
os.Unsetenv(string(menv.CASTemplateToReadCStorSnapshotENVK))
os.Unsetenv(string(menv.CASTemplateToReadJivaSnapshotENVK))
}()
for name, test := range tests {
t.Run(name, func(t *testing.T) {
sc.Annotations[string(v1alpha1.CASTemplateKeyForSnapshotRead)] = test.scReadCASAnnotation
sc.Annotations[string(v1alpha1.CASTypeKey)] = test.scCASTypeAnnotation
os.Setenv(string(menv.CASTemplateToReadCStorSnapshotENVK), test.envCStorCAST)
os.Setenv(string(menv.CASTemplateToReadJivaSnapshotENVK), test.envJivaCAST)
castName := getReadCASTemplate(sc)
if castName != test.expectedCAST {
t.Fatalf("unexpected cast name, wanted %q got %q", test.expectedCAST, castName)
}
})
}
}
func TestGetDeleteCASTemplate(t *testing.T) {
sc := &v1_storage.StorageClass{}
sc.Annotations = make(map[string]string)
tests := map[string]struct {
scDeleteCASAnnotation string
scCASTypeAnnotation string
envJivaCAST string
envCStorCAST string
expectedCAST string
}{
"CAST annotation is present": {
"cast-read-from-annotation",
"",
"",
"",
"cast-read-from-annotation",
},
"CAST annotation is absent/empty and cas type is cstor": {
"",
"cstor",
"",
"cast-cstor-read-from-env",
"cast-cstor-read-from-env",
},
"CAST annotation is absent/empty and cas type is jiva": {
"",
"jiva",
"cast-jiva-read-from-env",
"",
"cast-jiva-read-from-env",
},
"CAST annotation is absent/empty and cas type unknown": {
"",
"unknown",
"cast-jiva-read-from-env",
"cast-cstor-read-from-env",
"",
},
}
defer func() {
os.Unsetenv(string(menv.CASTemplateToDeleteCStorSnapshotENVK))
os.Unsetenv(string(menv.CASTemplateToDeleteJivaSnapshotENVK))
}()
for name, test := range tests {
t.Run(name, func(t *testing.T) {
sc.Annotations[string(v1alpha1.CASTemplateKeyForSnapshotDelete)] = test.scDeleteCASAnnotation
sc.Annotations[string(v1alpha1.CASTypeKey)] = test.scCASTypeAnnotation
os.Setenv(string(menv.CASTemplateToDeleteCStorSnapshotENVK), test.envCStorCAST)
os.Setenv(string(menv.CASTemplateToDeleteJivaSnapshotENVK), test.envJivaCAST)
castName := getDeleteCASTemplate(sc)
if castName != test.expectedCAST {
t.Fatalf("unexpected cast name, wanted %q got %q", test.expectedCAST, castName)
}
})
}
} | "", |
indexed_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
from . import FairseqDataset
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['raw', 'lazy', 'cached', 'mmap']
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return 'raw'
elif IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
else:
return None
else:
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if impl == 'raw' and IndexedRawTextDataset.exists(path):
assert dictionary is not None
return IndexedRawTextDataset(path, dictionary)
elif impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path)
return None
def dataset_exists(path, impl):
if impl == 'raw':
return IndexedRawTextDataset.exists(path)
elif impl == 'mmap':
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
class IndexedDataset(FairseqDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
@lru_cache(maxsize=8)
def __getitem__(self, i):
if not self.data_file:
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
|
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx: ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, np.reshape(self.cache[ptx: ptx + a.size], tensor_size))
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
class IndexedRawTextDataset(FairseqDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
self.lines.append(line.strip('\n'))
tokens = dictionary.encode_line(
line, add_if_not_exist=False,
append_eos=self.append_eos, reverse_order=self.reverse_order,
).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError('index out of range')
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(path)
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
# +1 for Lua compatibility
bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
| return self.sizes[index] |
crash_1554249366815_12283_flaky_4.js | function main() {
function | (v2,v3,v4,v5) {
const v7 = [SharedArrayBuffer];
const v8 = v7.indexOf();
v8.__proto__ = Map;
for (let v12 = 0; v12 < 100; v12++) {
let v14 = undefined;
}
}
const v15 = v1();
const v16 = v1();
}
%NeverOptimizeFunction(main);
main();
| v1 |
cmd_status.go | package main
import (
"database/sql"
"fmt"
"log"
"path/filepath"
"time"
"github.com/tbruyelle/goose/lib/goose"
)
var statusCmd = &Command{
Name: "status",
Usage: "",
Summary: "dump the migration status for the current DB",
Help: `status extended help here...`,
Run: statusRun,
}
type StatusData struct {
Source string
Status string
}
func statusRun(cmd *Command, args ...string) {
conf, err := dbConfFromFlags()
if err != nil {
log.Fatal(err)
}
// collect all migrations
min := int64(0)
max := int64((1 << 63) - 1)
migrations, e := goose.CollectMigrations(conf.MigrationsDir, min, max)
if e != nil { | if e != nil {
log.Fatal("couldn't open DB:", e)
}
defer db.Close()
// must ensure that the version table exists if we're running on a pristine DB
if _, e := goose.EnsureDBVersion(conf, db); e != nil {
log.Fatal(e)
}
fmt.Printf("goose*: status for environment '%v'\n", conf.Env)
fmt.Println(" Applied At Migration")
fmt.Println(" =======================================")
for _, m := range migrations {
printMigrationStatus(db, m.Version, filepath.Base(m.Source))
}
}
func printMigrationStatus(db *sql.DB, version int64, script string) {
var row goose.MigrationRecord
q := fmt.Sprintf("SELECT tstamp, is_applied FROM goose_db_version WHERE version_id=%d ORDER BY tstamp DESC LIMIT 1", version)
e := db.QueryRow(q).Scan(&row.TStamp, &row.IsApplied)
if e != nil && e != sql.ErrNoRows {
log.Fatal(e)
}
var appliedAt string
if row.IsApplied {
appliedAt = row.TStamp.Format(time.ANSIC)
} else {
appliedAt = "Pending"
}
fmt.Printf(" %-24s -- %v\n", appliedAt, script)
} | log.Fatal(e)
}
db, e := goose.OpenDBFromDBConf(conf) |
builder_test.go | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routing
import (
"context"
"strings"
"testing"
"github.com/gogo/protobuf/types"
"istio.io/istio/mixer/pkg/adapter"
"istio.io/istio/mixer/pkg/il/compiled"
"istio.io/istio/mixer/pkg/runtime2/config"
"istio.io/istio/mixer/pkg/runtime2/handler"
"istio.io/istio/mixer/pkg/runtime2/testing/data"
"istio.io/istio/mixer/pkg/runtime2/testing/util"
"istio.io/istio/mixer/pkg/template"
)
// tests is a declarative test suite for the routing table. It covers both table building as well as table
// usage scenarios.
var tests = []struct {
// Name of the test
Name string
// Config values to use when using the builder to build the table. If ServiceConfig is empty, the default
// one from the testing/data package is used instead.
ServiceConfig string
Configs []string
Adapters map[string]*adapter.Info
Templates map[string]*template.Info
// ExpectedTable is the expected routing table output w/ debug info. Used by builder tests to verify the
// table structure.
ExpectedTable string
}{
{
Name: "basic",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.RuleCheck1,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] icheck1.tcheck.istio-system {I}
`,
},
{
Name: "multiple-instances-check",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.InstanceCheck2,
data.RuleCheck1WithInstance1And2,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] icheck1.tcheck.istio-system {I}
[#1] icheck2.tcheck.istio-system {I}
`,
},
{
Name: "multiple-instances-report",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerAReport1,
data.InstanceReport1,
data.InstanceReport2,
data.RuleReport1And2,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_REPORT {V}
[#0] istio-system {NS}
[#0] hreport1.areport.istio-system {H}
[#0]
Condition: <NONE>
[#0] ireport1.treport.istio-system {I}
[#1] ireport2.treport.istio-system {I}
`,
},
{
Name: "check-instance-with-conditional",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.RuleCheck1WithMatchClause,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: match(target.name, "foo*")
[#0] icheck1.tcheck.istio-system {I}
`,
},
{
Name: "instance-with-conditional-true",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.RuleCheck1TrueCondition,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] icheck1.tcheck.istio-system {I}
`,
},
{
Name: "multi-instance-with-conditional",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.InstanceCheck2,
data.RuleCheck1WithInstance1And2WithMatchClause,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: match(target.name, "foo*")
[#0] icheck1.tcheck.istio-system {I}
[#1] icheck2.tcheck.istio-system {I}
`,
},
{
Name: "multi-instance-multi-conditional",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.InstanceCheck2,
data.InstanceCheck3,
data.RuleCheck1WithInstance1And2WithMatchClause,
data.RuleCheck2WithInstance2And3WithMatchClause,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: match(target.name, "foo*")
[#0] icheck1.tcheck.istio-system {I}
[#1] icheck2.tcheck.istio-system {I}
[#1]
Condition: target.name.startsWith("foo")
[#0] icheck2.tcheck.istio-system {I}
[#1] icheck3.tcheck.istio-system {I}
`,
},
{
Name: "multi-rule-to-same-target",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.InstanceCheck2,
data.InstanceCheck3,
data.RuleCheck1WithInstance1And2,
data.RuleCheck2WithInstance2And3,
},
// TODO(Issue #2690): We should dedupe instances that are being dispatched to a particular handler.
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] icheck1.tcheck.istio-system {I}
[#1] icheck2.tcheck.istio-system {I}
[#2] icheck2.tcheck.istio-system {I}
[#3] icheck3.tcheck.istio-system {I}
`,
},
{
Name: "multi-rule-to-same-target-with-one-conditional",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.InstanceCheck2,
data.InstanceCheck3,
data.RuleCheck1,
data.RuleCheck2WithInstance2And3WithMatchClause,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] icheck1.tcheck.istio-system {I}
[#1]
Condition: target.name.startsWith("foo")
[#0] icheck2.tcheck.istio-system {I}
[#1] icheck3.tcheck.istio-system {I}
`,
},
{
Name: "multi-rule-to-multiple-target",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.HandlerACheck2,
data.InstanceCheck1,
data.InstanceCheck2,
data.RuleCheck1,
data.RuleCheck2WithHandler2AndInstance2,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] icheck1.tcheck.istio-system {I}
[#1] hcheck2.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] icheck2.tcheck.istio-system {I}
`,
},
{
Name: "bad-condition",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.RuleCheck1WithBadCondition,
},
// No routes are set, as the only rule is bad.
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
`,
},
{
Name: "bad-handler-name",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.RuleCheck1WithBadHandler,
},
// No routes are set, as the only rule is bad.
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
`,
},
{
Name: "bad-handler-builder",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.RuleCheck1,
},
Adapters: data.BuildAdapters(nil, data.FakeAdapterSettings{Name: "acheck", ErrorAtBuild: true}),
// No routes are set, as the only rule is bad.
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
`,
},
{
Name: "handler-does-not-support-template",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.RuleCheck1,
},
Templates: data.BuildTemplates(nil, data.FakeTemplateSettings{Name: "tcheck", HandlerDoesNotSupportTemplate: true}),
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
`,
},
{
Name: "different-namespace",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck3NS2,
data.InstanceCheck4NS2,
data.RuleCheck3NS2,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] ns2 {NS}
[#0] hcheck3.acheck.ns2 {H}
[#0]
Condition: <NONE>
[#0] icheck4.tcheck.ns2 {I}
`,
},
{
Name: "non-default-namespace-rules-subsume-default-namespace-rules",
ServiceConfig: data.ServiceConfig,
Configs: []string{
// default
data.HandlerACheck1,
data.InstanceCheck1,
data.RuleCheck1,
// ns2
data.HandlerACheck3NS2,
data.InstanceCheck4NS2,
data.RuleCheck3NS2,
},
// ns2 ruleset is a superset of the default set.
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] icheck1.tcheck.istio-system {I}
[#1] ns2 {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] icheck1.tcheck.istio-system {I}
[#1] hcheck3.acheck.ns2 {H}
[#0]
Condition: <NONE>
[#0] icheck4.tcheck.ns2 {I}
`,
},
{
Name: "builder-mapper-error-causes-dependent-rules-to-be-omitted",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.RuleCheck1,
},
Templates: data.BuildTemplates(nil, data.FakeTemplateSettings{Name: "tcheck", ErrorAtCreateInstanceBuilder: true}),
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
`},
{
Name: "match-condition-wrong-return-type-in-match-claus-causes-rule-to-be-omitted.",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.RuleCheck1WithNonBooleanCondition,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
`},
{
Name: "apa",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerAPA1,
data.InstanceAPA1,
data.RuleApa1,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_ATTRIBUTE_GENERATOR {V}
[#0] istio-system {NS}
[#0] hapa1.apa.istio-system {H}
[#0]
Condition: <NONE>
[#0] iapa1.tapa.istio-system {I}
`},
{
Name: "apa-error-at-output-expressions",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerAPA1,
data.InstanceAPA1,
data.RuleApa1,
},
Templates: data.BuildTemplates(nil, data.FakeTemplateSettings{Name: "tapa", ErrorAtCreateOutputExpressions: true}),
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
`,
},
{
Name: "different-templates-in-same-rule",
ServiceConfig: data.ServiceConfig,
Configs: []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.InstanceHalt1,
data.Rule4CheckAndHalt,
},
ExpectedTable: `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] icheck1.tcheck.istio-system {I}
[#1] hcheck1.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] ihalt1.thalt.istio-system {I}`,
},
}
func TestBuilder(t *testing.T) {
for _, tst := range tests {
t.Run(tst.Name, func(tt *testing.T) {
serviceConfig := tst.ServiceConfig
if len(serviceConfig) == 0 {
serviceConfig = data.ServiceConfig
}
templates := tst.Templates
if templates == nil {
templates = data.BuildTemplates(nil)
}
adapters := tst.Adapters
if adapters == nil {
adapters = data.BuildAdapters(nil)
}
t, s := buildTableWithTemplatesAndAdapters(templates, adapters, serviceConfig, tst.Configs, true)
actual := t.String()
if normalize(actual) != normalize(tst.ExpectedTable) {
tt.Logf("Config:\n%v\n\n", tst.Configs)
tt.Logf("Snapshot:\n%s\n\n", s)
tt.Logf("Debug: true")
tt.Fatalf("got:\n%v\nwant:\n%v\n", actual, tst.ExpectedTable)
}
reachedEnd := false
defer func() {
r := recover()
if !reachedEnd |
}()
// rerun with debug = false to ensure there is no crash.
t, _ = buildTable(serviceConfig, tst.Configs, false)
_ = t.String()
reachedEnd = true
})
}
}
var (
normalizer = strings.NewReplacer("\t", "", "\n", "", " ", "")
)
// Normalize a string for textual comparison.
func normalize(str string) string {
return normalizer.Replace(str)
}
// Convenience method for building a routing Table for tests.
func buildTable(serviceConfig string, globalConfigs []string, debugInfo bool) (*Table, *config.Snapshot) {
return buildTableWithTemplatesAndAdapters(data.BuildTemplates(nil), data.BuildAdapters(nil), serviceConfig, globalConfigs, debugInfo)
}
// Convenience method for building a routing Table for tests.
func buildTableWithTemplatesAndAdapters(templates map[string]*template.Info, adapters map[string]*adapter.Info,
serviceConfig string, globalConfigs []string, debugInfo bool) (*Table, *config.Snapshot) {
if len(serviceConfig) == 0 {
serviceConfig = data.ServiceConfig
}
globalConfig := data.JoinConfigs(globalConfigs...)
s := util.GetSnapshot(templates, adapters, serviceConfig, globalConfig)
ht := handler.NewTable(handler.Empty(), s, nil)
expb := compiled.NewBuilder(s.Attributes)
return BuildTable(ht, s, expb, "istio-system", debugInfo), s
}
func TestNonPointerAdapter(t *testing.T) {
templates := data.BuildTemplates(nil)
adapters := map[string]*adapter.Info{
"acheck": {
Name: "acheck",
SupportedTemplates: []string{"tcheck", "thalt"},
DefaultConfig: &types.Struct{},
NewBuilder: func() adapter.HandlerBuilder {
return nonPointerBuilder{}
},
},
}
globalConfigs := []string{
data.HandlerACheck1,
data.InstanceCheck1,
data.InstanceCheck2,
data.RuleCheck1WithInstance1And2,
}
expected := `
[Routing ExpectedTable]
ID: 1
[#0] TEMPLATE_VARIETY_CHECK {V}
[#0] istio-system {NS}
[#0] hcheck1.acheck.istio-system {H}
[#0]
Condition: <NONE>
[#0] icheck1.tcheck.istio-system {I}
[#1] icheck2.tcheck.istio-system {I}
`
table, _ := buildTableWithTemplatesAndAdapters(templates, adapters, data.ServiceConfig, globalConfigs, true)
actual := table.String()
if normalize(actual) != normalize(expected) {
t.Fatalf("got:\n%v\nwant:\n%v\n", actual, expected)
}
}
type nonPointerBuilder struct{}
var _ adapter.HandlerBuilder = nonPointerBuilder{}
func (n nonPointerBuilder) SetAdapterConfig(adapter.Config) {
}
func (n nonPointerBuilder) Validate() *adapter.ConfigErrors {
return nil
}
func (n nonPointerBuilder) Build(context.Context, adapter.Env) (adapter.Handler, error) {
return nonPointerHandler{fn: func() {}}, nil
}
type nonPointerHandler struct {
// Make handler non-comparable.
fn func()
}
var _ adapter.Handler = nonPointerHandler{}
func (h nonPointerHandler) Close() error {
h.fn()
return nil
}
| {
tt.Fatalf("buildTable(debugInfo=false) failed with a panic: '%v'", r)
} |
testresponse.py | __author__ = 'pather'
import urllib.request
import re
africa_url_response = urllib.request.urlopen('http://worldpopulationreview.com/continents/africa-population/')
africa_url_html = africa_url_response.read()
africa_url_text = africa_url_html.decode('UTF-8')
africa_current_population = re.search('<span>([^<]*)', africa_url_text).group(1)
current_population = africa_current_population
# africa_future_population = current_population
print(africa_current_population)
# this wasn't need in the lab 3
def chosen_continent_pop_finder(name_of_continent):
| if name_of_continent == 'Asia' or name_of_continent == 'asia':
return name_of_continent
elif name_of_continent == 'Africa' or name_of_continent == 'africa':
return name_of_continent
elif name_of_continent == 'Europe' or name_of_continent == 'europe':
return name_of_continent
elif name_of_continent == 'South America' or name_of_continent == 'south america':
return name_of_continent
elif name_of_continent == 'North America' or name_of_continent == 'north america':
return name_of_continent
elif name_of_continent == 'Oceania' or name_of_continent == 'oceania':
return name_of_continent
else:
print("Whoops! Let's try this again")
main() |
|
sync_routes.go | //
// Copyright (c) 2019-2021 Red Hat, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package devworkspacerouting
import (
"context"
"fmt"
"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
routeV1 "github.com/openshift/api/route/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
controllerv1alpha1 "github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
)
var routeDiffOpts = cmp.Options{
cmpopts.IgnoreFields(routeV1.Route{}, "TypeMeta", "ObjectMeta", "Status"),
cmpopts.IgnoreFields(routeV1.RouteSpec{}, "WildcardPolicy", "Host"),
cmpopts.IgnoreFields(routeV1.RouteTargetReference{}, "Weight"),
}
func (r *DevWorkspaceRoutingReconciler) syncRoutes(routing *controllerv1alpha1.DevWorkspaceRouting, specRoutes []routeV1.Route) (ok bool, clusterRoutes []routeV1.Route, err error) {
routesInSync := true
clusterRoutes, err = r.getClusterRoutes(routing)
if err != nil {
return false, nil, err
}
toDelete := getRoutesToDelete(clusterRoutes, specRoutes)
for _, route := range toDelete {
err := r.Delete(context.TODO(), &route)
if err != nil {
return false, nil, err
}
routesInSync = false
}
for _, specRoute := range specRoutes {
if contains, idx := listContainsRouteByName(specRoute, clusterRoutes); contains {
clusterRoute := clusterRoutes[idx]
if !cmp.Equal(specRoute, clusterRoute, routeDiffOpts) {
r.Log.Info(fmt.Sprintf("Updating route: %s", clusterRoute.Name))
if r.DebugLogging {
r.Log.Info(fmt.Sprintf("Diff: %s", cmp.Diff(specRoute, clusterRoute, routeDiffOpts)))
}
// Update route's spec
clusterRoute.Spec = specRoute.Spec
err := r.Update(context.TODO(), &clusterRoute)
if err != nil && !errors.IsConflict(err) {
return false, nil, err
}
routesInSync = false
}
} else {
err := r.Create(context.TODO(), &specRoute)
if err != nil {
return false, nil, err
}
routesInSync = false
}
}
return routesInSync, clusterRoutes, nil
}
func (r *DevWorkspaceRoutingReconciler) getClusterRoutes(routing *controllerv1alpha1.DevWorkspaceRouting) ([]routeV1.Route, error) {
found := &routeV1.RouteList{}
labelSelector, err := labels.Parse(fmt.Sprintf("%s=%s", constants.DevWorkspaceIDLabel, routing.Spec.DevWorkspaceId))
if err != nil { | }
listOptions := &client.ListOptions{
Namespace: routing.Namespace,
LabelSelector: labelSelector,
}
err = r.List(context.TODO(), found, listOptions)
if err != nil {
return nil, err
}
var routes []routeV1.Route
for _, route := range found.Items {
for _, ownerref := range route.OwnerReferences {
// We need to filter routes that are created automatically for ingresses on OpenShift
if ownerref.Kind == "Ingress" {
continue
}
routes = append(routes, route)
}
}
return routes, nil
}
func getRoutesToDelete(clusterRoutes, specRoutes []routeV1.Route) []routeV1.Route {
var toDelete []routeV1.Route
for _, clusterRoute := range clusterRoutes {
if contains, _ := listContainsRouteByName(clusterRoute, specRoutes); !contains {
toDelete = append(toDelete, clusterRoute)
}
}
return toDelete
}
func listContainsRouteByName(query routeV1.Route, list []routeV1.Route) (exists bool, idx int) {
for idx, listRoute := range list {
if query.Name == listRoute.Name {
return true, idx
}
}
return false, -1
} | return nil, err |
api_data_sci.py | #!/usr/local/bin/python
import sys
import pymongo
import argparse
from bson import ObjectId
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
import bottle
from bottle import Bottle, redirect, request, response, static_file, request
from bson.json_util import dumps
import author_gene_clustering_module
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024
import app
api = Bottle()
log = app.get_logger('api_alt')
# generic API for returning the record count for a specific mongo database/collection
@api.get('/ds/getmessage')
def | ():
return {
'message' : 'success'
}
# generic API for returning the record count for a specific mongo database/collection
@api.get('/ds/getbpnet/:genes')
def ds_get_bp_net(genes):
genes_list = genes.split(',')
graph_json = author_gene_clustering_module.analyze_AG_bipartite_network(genes_list)
if (request.query.callback):
response.content_type = "application/javascript"
return "%s(%s);" % (request.query.callback, graph_json)
return graph_json
return {
'message' : graph_json
}
# run the web server
def main():
status = 0
parser = argparse.ArgumentParser()
parser.add_argument('port', nargs='?', type=int, help='HTTP port', default=80)
args = parser.parse_args()
print 'starting web server on port %s' % args.port
print 'press control-c to quit'
try:
server = WSGIServer(('0.0.0.0', args.port), api, handler_class=WebSocketHandler)
log.info('entering main loop')
server.serve_forever()
except KeyboardInterrupt:
log.info('exiting main loop')
except Exception as e:
str = 'could not start web server: %s' % e
log.error(str)
print str
status = 1
log.info('exiting with status %d', status)
return status
if __name__ == '__main__':
sys.exit(main()) | ds_getmessage |
system_test.rs | //! A system test:
//! (i) Sets up test configuration,
//! (ii) interacts with the Internet Computer under test
//! (iii) makes assertions about the test results.
//!
//! The SystemTest encodes the test configuration (including the initial network
//! topology, i.e. number of subnetworks and nodes). When the test starts, an
//! Internet Computer instance is created with the test configuration using
//! processes on the local machine. The test author can describe interactions
//! with the IC in a programmatic using the API provided by `IcInstance`, an
//! of which is returned by the `start()` method on `InternetComputer`.
//!
//! The following exemplifies the structure of a system test:
//!
//! ```
//! use ic_scenario_tests::{system_test::InternetComputer};
//! use ic_scenario_tests::api::system::builder::Subnet;
//!
//! #[tokio::test]
//! async fn test_name() {
//! // This should be removed in the future when system tests are identified prior to running
//! // the tests.
//! if InternetComputer::is_system_test_environment().is_err() {
//! return;
//! }
//! let ic = InternetComputer::new()
//! .with_subnet(Subnet::new().add_nodes(4))
//! .with_registered(2)
//! .start();
//! /* test logic */
//! }
//! ```
use crate::api::system::handle::{InitialReplica, SystemTestError, SystemTestResult};
use crate::api::system::{builder::Subnet, handle::IcHandle};
use crate::ltl::*;
use ic_config::logger::Config as LoggerConfig;
use ic_logger::{new_replica_logger, LoggerImpl};
use ic_registry_transport::pb::v1::RegistryMutation;
use ic_types::malicious_behaviour::MaliciousBehaviour;
use ic_types::NodeId;
use ic_utils::command::is_file_on_path;
use log_analyzer::*;
use std::collections::BTreeMap;
use std::sync::Arc;
pub const NODEMANAGER_EXECUTABLE: &str = "nodemanager";
pub const REPLICA_EXECUTABLE: &str = "replica";
#[derive(Clone, Debug)]
pub struct InternetComputer {
pub initial_replica: Option<InitialReplica>,
pub subnets: Vec<Subnet>,
/// `true` iff the initial configuration of the IC contains an NNS subnet.
/// The configuration for the NNS subnet is placed at index 0 of the
/// `subnets` vector.
pub nns_subnet_present: bool,
pub registered_nodes: usize,
pub malicious_behaviours: BTreeMap<NodeId, MaliciousBehaviour>,
pub actix_flag: bool,
pub initial_mutations: Vec<RegistryMutation>,
}
impl InternetComputer {
pub fn new() -> Self {
Self::default()
}
/// A subnet with `nodes` nodes.
pub fn with_subnet(mut self, subnet: Subnet) -> Self {
self.subnets.push(subnet);
self
}
pub fn with_nns_subnet(mut self, subnet: Subnet) -> Self {
if self.nns_subnet_present {
panic!("Called with_nns_subnet() more than once.");
}
self.subnets.insert(0, subnet);
self.nns_subnet_present = true;
self
}
/// Assume an initial condition where `nodes` nodes are registered, but not
/// assigned to a network.
pub fn with_registered_nodes(mut self, nodes: usize) -> Self {
self.registered_nodes = nodes;
self
}
pub fn with_initial_replica(mut self, initial_replica: InitialReplica) -> Self {
self.initial_replica = Some(initial_replica);
self
}
pub fn with_actix_flag(mut self) -> Self {
self.actix_flag = true;
self
}
pub fn with_initial_mutation(mut self, mutation: RegistryMutation) -> Self {
self.initial_mutations.push(mutation);
self
}
/// Collects CLI arguments from the environment and runs the test.
pub async fn start(self) -> Arc<IcHandle> |
/// Collects CLI arguments from the environment and runs the test. Moreover,
/// runs the provided `Analyzer`, if any, with the log from all started
/// processes.
pub async fn start_with_analyzer(
self,
_with_analyzer: Analyzer<'static, LogEntryFrom>,
) -> Arc<IcHandle> {
Self::is_system_test_environment().unwrap();
let logger_config = LoggerConfig::default();
let base_logger = LoggerImpl::new(&logger_config, "scenario_test".into());
let logger = new_replica_logger(base_logger.root.clone(), &logger_config);
let actix_flag = self.actix_flag;
IcHandle::from_internet_computer(
self,
logger,
base_logger,
// , with_analyzer
actix_flag,
)
.await
.expect("Could not instantiate IC")
}
pub fn is_system_test_environment() -> SystemTestResult<()> {
Self::is_file_on_path(NODEMANAGER_EXECUTABLE)
.and_then(|_| Self::is_file_on_path(REPLICA_EXECUTABLE))
}
fn is_file_on_path(f: &str) -> SystemTestResult<()> {
if !is_file_on_path(f) {
return Err(SystemTestError::InitializationError(format!(
"Executable '{}' not found on the path.",
f
)));
}
Ok(())
}
}
impl Default for InternetComputer {
fn default() -> Self {
Self {
initial_replica: None,
subnets: vec![],
nns_subnet_present: false,
registered_nodes: 0,
malicious_behaviours: Default::default(),
actix_flag: false,
initial_mutations: vec![],
}
}
}
#[cfg(test)]
mod tests {
use crate::api::system::builder::Subnet;
use crate::system_test::InternetComputer;
use canister_test::Wasm;
use ic_test_utilities::universal_canister::{wasm, UNIVERSAL_CANISTER_WASM};
#[ignore]
#[tokio::test]
async fn can_query_using_universal_canister() {
let ic = InternetComputer::new()
.with_subnet(Subnet::new().add_nodes(4))
.start()
.await
.ready()
.await
.expect("Not ready yet");
let node0 = ic.subnet_by_idx(0).node_by_idx(0);
let api0 = node0.api();
let c0 = Wasm::from_bytes(UNIVERSAL_CANISTER_WASM)
.install_(&api0, vec![])
.await
.unwrap();
let arbitrary_bytes = b"l49sdk";
let response = c0
.query_(
"query",
on_wire::bytes,
wasm().reply_data(arbitrary_bytes).build(),
)
.await
.unwrap();
assert_eq!(response, arbitrary_bytes.to_vec());
}
}
| {
self.start_with_analyzer(analyzer()).await
} |
api_individual_sm_context.go | /*
* Nsmf_PDUSession
*
* SMF PDU Session Service
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package Nsmf_PDUSession
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/5g-core/openapi"
"github.com/5g-core/openapi/models"
"github.com/antihax/optional"
)
// Linger please
var (
_ context.Context
)
type IndividualSMContextApiService service
/*
IndividualSMContextApiService Release SM Context
* @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param smContextRef SM context reference
* @param optional nil or *ReleaseSmContextParamOpts - Optional Parameters:
* @param "SmContextReleaseData" (optional.Interface of SmContextReleaseData) - representation of the data to be sent to the SMF when releasing the SM context
*/
type ReleaseSmContextParamOpts struct {
SmContextReleaseData optional.Interface
}
func (a *IndividualSMContextApiService) ReleaseSmContext(ctx context.Context, smContextRef string, releaseSmContextRequest models.ReleaseSmContextRequest) (*http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Post")
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
)
// create path and map variables
localVarPath := a.client.cfg.BasePath() + "/sm-contexts/{smContextRef}/release"
localVarPath = strings.Replace(localVarPath, "{"+"smContextRef"+"}", fmt.Sprintf("%v", smContextRef), -1)
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
if releaseSmContextRequest.BinaryDataN2SmInformation != nil {
localVarHeaderParams["Content-Type"] = "multipart/related"
localVarPostBody = &releaseSmContextRequest
} else {
localVarHeaderParams["Content-Type"] = "application/json"
localVarPostBody = releaseSmContextRequest.JsonData
}
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/problem+json"}
// set Accept header
localVarHttpHeaderAccept := openapi.SelectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
r, err := openapi.PrepareRequest(ctx, a.client.cfg, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
return nil, err
}
localVarHttpResponse, err := openapi.CallAPI(a.client.cfg, r)
if err != nil || localVarHttpResponse == nil {
return localVarHttpResponse, err
}
localVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)
localVarHttpResponse.Body.Close()
if err != nil {
return localVarHttpResponse, err
}
apiError := openapi.GenericOpenAPIError{
RawBody: localVarBody,
ErrorStatus: localVarHttpResponse.Status,
}
switch localVarHttpResponse.StatusCode {
case 204:
return localVarHttpResponse, nil
case 400:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarHttpResponse, apiError
case 403:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarHttpResponse, apiError
case 404:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarHttpResponse, apiError
case 411:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarHttpResponse, apiError
case 413:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarHttpResponse, apiError
case 415:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarHttpResponse, apiError
case 429:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarHttpResponse, apiError
case 500:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarHttpResponse, apiError
case 503:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarHttpResponse, apiError
default:
return localVarHttpResponse, openapi.ReportError("%d is not a valid status code in ReleaseSmContext", localVarHttpResponse.StatusCode)
}
}
/*
IndividualSMContextApiService Retrieve SM Context
* @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param smContextRef SM context reference
* @param optional nil or *RetrieveSmContextParamOpts - Optional Parameters:
* @param "SmContextRetrieveData" (optional.Interface of SmContextRetrieveData) - parameters used to retrieve the SM context
@return SmContextRetrievedData
*/
type RetrieveSmContextParamOpts struct {
SmContextRetrieveData optional.Interface
}
func (a *IndividualSMContextApiService) RetrieveSmContext(ctx context.Context, smContextRef string, localVarOptionals *RetrieveSmContextParamOpts) (models.SmContextRetrievedData, *http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Post")
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
localVarReturnValue models.SmContextRetrievedData
)
// create path and map variables
localVarPath := a.client.cfg.BasePath() + "/sm-contexts/{smContextRef}/retrieve"
localVarPath = strings.Replace(localVarPath, "{"+"smContextRef"+"}", fmt.Sprintf("%v", smContextRef), -1)
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
localVarHttpContentTypes := []string{"application/json"}
localVarHeaderParams["Content-Type"] = localVarHttpContentTypes[0] // use the first content type specified in 'consumes'
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/json", "application/problem+json"}
// set Accept header
localVarHttpHeaderAccept := openapi.SelectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
// body params
if localVarOptionals != nil && localVarOptionals.SmContextRetrieveData.IsSet() |
r, err := openapi.PrepareRequest(ctx, a.client.cfg, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
return localVarReturnValue, nil, err
}
localVarHttpResponse, err := openapi.CallAPI(a.client.cfg, r)
if err != nil || localVarHttpResponse == nil {
return localVarReturnValue, localVarHttpResponse, err
}
localVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)
localVarHttpResponse.Body.Close()
if err != nil {
return localVarReturnValue, localVarHttpResponse, err
}
apiError := openapi.GenericOpenAPIError{
RawBody: localVarBody,
ErrorStatus: localVarHttpResponse.Status,
}
switch localVarHttpResponse.StatusCode {
case 200:
err = openapi.Deserialize(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
}
return localVarReturnValue, localVarHttpResponse, nil
case 400:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 403:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 404:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 411:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 413:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 415:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 429:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 500:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 503:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
default:
return localVarReturnValue, localVarHttpResponse, openapi.ReportError("%d is not a valid status code in RetrieveSmContext", localVarHttpResponse.StatusCode)
}
}
/*
IndividualSMContextApiService Update SM Context
* @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param smContextRef SM context reference
* @param smContextUpdateData representation of the updates to apply to the SM context
@return SmContextUpdatedData
*/
func (a *IndividualSMContextApiService) UpdateSmContext(ctx context.Context, smContextRef string, updateSmContextRequest models.UpdateSmContextRequest) (models.UpdateSmContextResponse, *http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Post")
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
localVarReturnValue models.UpdateSmContextResponse
)
// create path and map variables
localVarPath := a.client.cfg.BasePath() + "/sm-contexts/{smContextRef}/modify"
localVarPath = strings.Replace(localVarPath, "{"+"smContextRef"+"}", fmt.Sprintf("%v", smContextRef), -1)
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
// to determine the request Content-Type header
if updateSmContextRequest.BinaryDataN1SmMessage != nil || updateSmContextRequest.BinaryDataN2SmInformation != nil {
localVarHeaderParams["Content-Type"] = "multipart/related"
localVarPostBody = &updateSmContextRequest
} else {
localVarHeaderParams["Content-Type"] = "application/json"
localVarPostBody = updateSmContextRequest.JsonData
}
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/json", "multipart/related", "application/problem+json"}
// set Accept header
localVarHttpHeaderAccept := openapi.SelectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
r, err := openapi.PrepareRequest(ctx, a.client.cfg, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
return localVarReturnValue, nil, err
}
localVarHttpResponse, err := openapi.CallAPI(a.client.cfg, r)
if err != nil || localVarHttpResponse == nil {
return localVarReturnValue, localVarHttpResponse, err
}
localVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)
localVarHttpResponse.Body.Close()
if err != nil {
return localVarReturnValue, localVarHttpResponse, err
}
apiError := openapi.GenericOpenAPIError{
RawBody: localVarBody,
ErrorStatus: localVarHttpResponse.Status,
}
switch localVarHttpResponse.StatusCode {
case 200:
err = openapi.Deserialize(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
}
return localVarReturnValue, localVarHttpResponse, nil
case 204:
return localVarReturnValue, localVarHttpResponse, nil
case 400:
var v models.UpdateSmContextErrorResponse
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 403:
var v models.UpdateSmContextErrorResponse
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 404:
var v models.UpdateSmContextErrorResponse
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 411:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 413:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 415:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 429:
var v models.ProblemDetails
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 500:
var v models.UpdateSmContextErrorResponse
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
case 503:
var v models.UpdateSmContextErrorResponse
err = openapi.Deserialize(&v, localVarBody, localVarHttpResponse.Header.Get("Content-Type"))
if err != nil {
apiError.ErrorStatus = err.Error()
return localVarReturnValue, localVarHttpResponse, apiError
}
apiError.ErrorModel = v
return localVarReturnValue, localVarHttpResponse, apiError
default:
return localVarReturnValue, localVarHttpResponse, openapi.ReportError("%d is not a valid status code in UpdateSmContext", localVarHttpResponse.StatusCode)
}
}
| {
localVarOptionalSmContextRetrieveData, localVarOptionalSmContextRetrieveDataok := localVarOptionals.SmContextRetrieveData.Value().(models.SmContextRetrieveData)
if !localVarOptionalSmContextRetrieveDataok {
return localVarReturnValue, nil, openapi.ReportError("smContextRetrieveData should be SmContextRetrieveData")
}
localVarPostBody = &localVarOptionalSmContextRetrieveData
} |
handlers.ts | import {
BackendDestinationEnum,
getEndpoint,
} from '../../lib/utils/functions/getEndpoint'
import { mockSchemaBinding } from '../mockSchema'
import { rest } from 'msw'
import {
mockFileEntity,
mockFileEntityBundle,
mockProjectEntity,
mockProjectEntityBundle,
mockPaginatedEntityHeaders,
MOCK_FILE_ENTITY_ID,
MOCK_FILE_NAME,
MOCK_INVALID_PROJECT_NAME,
MOCK_PROJECT_ID,
MOCK_PROJECT_NAME,
mockFileEntityJson,
} from '../entity/mockEntity'
import {
mockUserBundle,
mockUserProfileData,
MOCK_USER_ID,
} from '../user/mock_user_profile'
import {
ENTITY,
ENTITY_BUNDLE_V2,
ENTITY_SCHEMA_BINDING,
FAVORITES,
USER_ID_BUNDLE,
USER_PROFILE_ID,
USER_PROFILE,
ENTITY_JSON,
} from '../../lib/utils/APIConstants'
const handlers = [
rest.options('*', async (req, res, ctx) => {
return res(ctx.status(200))
}),
rest.get(
`${getEndpoint(BackendDestinationEnum.REPO_ENDPOINT)}${USER_PROFILE_ID(
':id',
)}`,
async (req, res, ctx) => {
let response: any = {
reason: `Mock Service worker could not find a user profile with ID ${req.params.id}`,
}
let status = 404
if (req.params.id === MOCK_USER_ID.toString()) {
response = mockUserProfileData
status = 200
}
return res(ctx.status(status), ctx.json(response))
},
),
rest.get(
`${getEndpoint(BackendDestinationEnum.REPO_ENDPOINT)}${USER_PROFILE}`,
async (req, res, ctx) => {
// default return a mock UserProfile.
let response: any = mockUserProfileData
let status = 200
return res(ctx.status(status), ctx.json(response))
},
),
rest.get(
`${getEndpoint(BackendDestinationEnum.REPO_ENDPOINT)}${USER_ID_BUNDLE(
':id',
)}`,
async (req, res, ctx) => {
let response: any = {
reason: `Mock Service worker could not find a user bundle with ID ${req.params.id}`,
}
let status = 404
if (req.params.id === MOCK_USER_ID.toString()) {
response = mockUserBundle
status = 200
}
return res(ctx.status(status), ctx.json(response))
},
),
// create entity
rest.post(
`${getEndpoint(BackendDestinationEnum.REPO_ENDPOINT)}${ENTITY}`,
async (req, res, ctx) => {
let response: any = {
reason: `Mock Service worker could not find a matching mock entity for this request : ${JSON.stringify(
req.body,
)}`,
}
let status = 404
if (req.body) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const requestBody = req.body as any
if (requestBody.name === MOCK_FILE_NAME) {
response = mockFileEntity
status = 200
} else if (requestBody.name === MOCK_PROJECT_NAME) {
response = mockProjectEntity
status = 200
} else if (requestBody.name === MOCK_INVALID_PROJECT_NAME) {
response.reason = 'Invalid project name'
status = 403
}
} | return res(ctx.status(status), ctx.json(response))
},
),
rest.post(
`${getEndpoint(BackendDestinationEnum.REPO_ENDPOINT)}${ENTITY_BUNDLE_V2(
':entityId',
)}`,
async (req, res, ctx) => {
let response: any = {
reason: `Mock Service worker could not find a mock entity bundle with ID ${req.params.entityId}`,
}
let status = 404
if (req.params.entityId === MOCK_FILE_ENTITY_ID) {
response = mockFileEntityBundle
status = 200
} else if (req.params.entityId === MOCK_PROJECT_ID) {
response = mockProjectEntityBundle
status = 200
}
return res(ctx.status(status), ctx.json(response))
},
),
rest.get(
`${getEndpoint(
BackendDestinationEnum.REPO_ENDPOINT,
)}${ENTITY_SCHEMA_BINDING(':entityId')}`,
async (req, res, ctx) => {
return res(ctx.status(200), ctx.json(mockSchemaBinding))
},
),
rest.get(
`${getEndpoint(BackendDestinationEnum.REPO_ENDPOINT)}${FAVORITES}`,
async (req, res, ctx) => {
return res(ctx.status(200), ctx.json(mockPaginatedEntityHeaders))
},
),
rest.get(
`${getEndpoint(BackendDestinationEnum.REPO_ENDPOINT)}${ENTITY_JSON(
':entityId',
)}`,
async (req, res, ctx) => {
const response = mockFileEntityJson
return res(ctx.status(200), ctx.json(response))
},
),
]
export { handlers } | |
workflow_test.go | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisy
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"testing"
"time"
"cloud.google.com/go/storage"
"google.golang.org/api/compute/v1"
"google.golang.org/api/option"
)
func TestAddDependency(t *testing.T) {
w := &Workflow{}
a, _ := w.NewStep("a")
b, _ := w.NewStep("b")
otherW := &Workflow{}
c, _ := otherW.NewStep("c")
tests := []struct {
desc string
in1, in2 *Step
shouldErr bool
}{
{"good case", a, b, false},
{"idempotent good case", a, b, false},
{"bad case 1", a, c, true},
{"bad case 2", c, b, true},
}
for _, tt := range tests {
if err := w.AddDependency(tt.in1, tt.in2); err == nil && tt.shouldErr {
t.Errorf("%s: should have erred but didn't", tt.desc)
} else if err != nil && !tt.shouldErr {
t.Errorf("%s: unexpected error: %v", tt.desc, err)
}
}
wantDeps := map[string][]string{"a": {"b"}}
if diffRes := diff(w.Dependencies, wantDeps, 0); diffRes != "" {
t.Errorf("incorrect dependencies: (-got,+want)\n%s", diffRes)
}
}
func TestDaisyBkt(t *testing.T) {
client, err := newTestGCSClient()
if err != nil {
t.Fatal(err)
}
project := "foo-project"
got, err := daisyBkt(context.Background(), client, project)
if err != nil {
t.Fatal(err)
}
want := project + "-daisy-bkt"
if got != project+"-daisy-bkt" {
t.Errorf("bucket does not match, got: %q, want: %q", got, want)
}
project = "bar-project"
got, err = daisyBkt(context.Background(), client, project)
if err != nil {
t.Fatal(err)
}
want = project + "-daisy-bkt"
if got != project+"-daisy-bkt" {
t.Errorf("bucket does not match, got: %q, want: %q", got, want)
}
}
func TestCleanup(t *testing.T) {
cleanedup1 := false
cleanedup2 := false
cleanup1 := func() dErr {
cleanedup1 = true
return nil
}
cleanup2 := func() dErr {
cleanedup2 = true
return nil
}
cleanupFail := func() dErr {
return errf("failed cleanup")
}
w := testWorkflow()
w.addCleanupHook(cleanup1)
w.addCleanupHook(cleanupFail)
w.addCleanupHook(cleanup2)
w.cleanup()
if !cleanedup1 {
t.Error("cleanup1 was not run")
}
if !cleanedup2 {
t.Error("cleanup2 was not run")
}
}
func TestGenName(t *testing.T) {
tests := []struct{ name, wfName, wfID, want string }{
{"name", "wfname", "123456789", "name-wfname-123456789"},
{"super-long-name-really-long", "super-long-workflow-name-like-really-really-long", "1", "super-long-name-really-long-super-long-workflow-name-lik-1"},
{"super-long-name-really-long", "super-long-workflow-name-like-really-really-long", "123456789", "super-long-name-really-long-super-long-workflow-name-lik-123456"},
}
w := &Workflow{}
for _, tt := range tests {
w.id = tt.wfID
w.Name = tt.wfName
result := w.genName(tt.name)
if result != tt.want {
t.Errorf("bad result, i: name=%s wfName=%s wfId=%s; got: %s; want: %s", tt.name, tt.wfName, tt.wfID, result, tt.want)
}
if len(result) > 64 {
t.Errorf("result > 64 characters, i: name=%s wfName=%s wfId=%s; got: %s", tt.name, tt.wfName, tt.wfID, result)
}
}
}
func TestGetSourceGCSAPIPath(t *testing.T) {
w := testWorkflow()
w.sourcesPath = "my/sources"
got := w.getSourceGCSAPIPath("foo")
want := "https://storage.cloud.google.com/my/sources/foo"
if got != want {
t.Errorf("unexpected result: got: %q, want %q", got, want)
}
}
func TestNewFromFileError(t *testing.T) {
td, err := ioutil.TempDir(os.TempDir(), "")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(td)
tf := filepath.Join(td, "test.wf.json")
tests := []struct{ data, error string }{
{
`{"test":["1", "2",]}`,
tf + ": JSON syntax error in line 1: invalid character ']' looking for beginning of value \n{\"test\":[\"1\", \"2\",]}\n ^",
},
{
`{"test":{"key1":"value1" "key2":"value2"}}`,
tf + ": JSON syntax error in line 1: invalid character '\"' after object key:value pair \n{\"test\":{\"key1\":\"value1\" \"key2\":\"value2\"}}\n ^",
},
{
`{"test": value}`,
tf + ": JSON syntax error in line 1: invalid character 'v' looking for beginning of value \n{\"test\": value}\n ^",
},
{
`{"test": "value"`,
tf + ": JSON syntax error in line 1: unexpected end of JSON input \n{\"test\": \"value\"\n ^",
},
{
"{\n\"test\":[\"1\", \"2\",],\n\"test2\":[\"1\", \"2\"]\n}",
tf + ": JSON syntax error in line 2: invalid character ']' looking for beginning of value \n\"test\":[\"1\", \"2\",],\n ^",
},
}
for i, tt := range tests {
if err := ioutil.WriteFile(tf, []byte(tt.data), 0600); err != nil {
t.Fatalf("error creating json file: %v", err)
}
if _, err := NewFromFile(tf); err == nil {
t.Errorf("expected error, got nil for test %d", i+1)
} else if err.Error() != tt.error {
t.Errorf("did not get expected error from NewFromFile():\ngot: %q\nwant: %q", err.Error(), tt.error)
}
}
}
func TestNewFromFile(t *testing.T) {
got, err := NewFromFile("./test_data/test.wf.json")
if err != nil {
t.Fatal(err)
}
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
want := New()
// These are difficult to validate and irrelevant, so we cheat.
want.id = got.ID()
want.Cancel = got.Cancel
want.cleanupHooks = got.cleanupHooks
want.disks = newDiskRegistry(want)
want.images = newImageRegistry(want)
want.instances = newInstanceRegistry(want)
want.networks = newNetworkRegistry(want)
want.workflowDir = filepath.Join(wd, "test_data")
want.Name = "some-name"
want.Project = "some-project"
want.Zone = "us-central1-a"
want.GCSPath = "gs://some-bucket/images"
want.OAuthPath = filepath.Join(wd, "test_data", "somefile")
want.Sources = map[string]string{}
want.autovars = map[string]string{}
want.Vars = map[string]Var{
"bootstrap_instance_name": {Value: "bootstrap-${NAME}", Required: true},
"machine_type": {Value: "n1-standard-1"},
"key1": {Value: "var1"},
"key2": {Value: "var2"},
}
want.Steps = map[string]*Step{
"create-disks": {
name: "create-disks",
CreateDisks: &CreateDisks{
{
Disk: compute.Disk{
Name: "bootstrap",
SourceImage: "projects/windows-cloud/global/images/family/windows-server-2016-core",
Type: "pd-ssd",
},
SizeGb: "50",
},
{
Disk: compute.Disk{
Name: "image",
SourceImage: "projects/windows-cloud/global/images/family/windows-server-2016-core",
Type: "pd-standard",
},
SizeGb: "50",
},
},
},
"${bootstrap_instance_name}": {
name: "${bootstrap_instance_name}",
CreateInstances: &CreateInstances{
{
Instance: compute.Instance{
Name: "${bootstrap_instance_name}",
Disks: []*compute.AttachedDisk{{Source: "bootstrap"}, {Source: "image"}},
MachineType: "${machine_type}",
},
StartupScript: "shutdown /h",
Metadata: map[string]string{"test_metadata": "this was a test"},
},
},
},
"${bootstrap_instance_name}-stopped": {
name: "${bootstrap_instance_name}-stopped",
Timeout: "1h",
WaitForInstancesSignal: &WaitForInstancesSignal{{Name: "${bootstrap_instance_name}", Stopped: true, Interval: "1s"}},
},
"postinstall": {
name: "postinstall",
CreateInstances: &CreateInstances{
{
Instance: compute.Instance{
Name: "postinstall",
Disks: []*compute.AttachedDisk{{Source: "image"}, {Source: "bootstrap"}},
MachineType: "${machine_type}",
},
StartupScript: "shutdown /h",
},
},
},
"postinstall-stopped": {
name: "postinstall-stopped",
WaitForInstancesSignal: &WaitForInstancesSignal{{Name: "postinstall", Stopped: true}},
},
"create-image": {
name: "create-image",
CreateImages: &CreateImages{{Image: compute.Image{Name: "image-from-disk", SourceDisk: "image"}}},
},
"include-workflow": {
name: "include-workflow",
IncludeWorkflow: &IncludeWorkflow{
Vars: map[string]string{
"key": "value",
},
Path: "./test_sub.wf.json",
},
},
"sub-workflow": {
name: "sub-workflow",
SubWorkflow: &SubWorkflow{
Vars: map[string]string{
"key": "value",
},
Path: "./test_sub.wf.json",
},
},
}
want.Dependencies = map[string][]string{
"create-disks": {},
"bootstrap": {"create-disks"},
"bootstrap-stopped": {"bootstrap"},
"postinstall": {"bootstrap-stopped"},
"postinstall-stopped": {"postinstall"},
"create-image": {"postinstall-stopped"},
"include-workflow": {"create-image"},
"sub-workflow": {"create-image"},
}
for _, s := range want.Steps {
s.w = want
}
if diffRes := diff(got, want, 0); diffRes != "" {
t.Errorf("parsed workflow does not match expectation: (-got +want)\n%s", diffRes)
}
}
func TestNewStep(t *testing.T) {
w := &Workflow{}
s, err := w.NewStep("s")
wantS := &Step{name: "s", w: w}
if s == nil || s.name != "s" || s.w != w {
t.Errorf("step does not meet expectation: got: %v, want: %v", s, wantS)
}
if err != nil {
t.Error("unexpected error when creating new step")
}
s, err = w.NewStep("s")
if s != nil {
t.Errorf("step should not have been created: %v", s)
}
if err == nil {
t.Error("should have erred, but didn't")
}
}
func TestPopulate(t *testing.T) {
ctx := context.Background()
client, err := newTestGCSClient()
if err != nil {
t.Fatal(err)
}
td, err := ioutil.TempDir(os.TempDir(), "")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(td)
tf := filepath.Join(td, "test.cred")
if err := ioutil.WriteFile(tf, []byte(`{ "type": "service_account" }`), 0600); err != nil {
t.Fatalf("error creating temp file: %v", err)
}
called := false
var stepPopErr dErr
stepPop := func(ctx context.Context, s *Step) dErr {
called = true
return stepPopErr
}
got := New()
got.Name = "${wf_name}"
got.Zone = "wf-zone"
got.Project = "bar-project"
got.OAuthPath = tf
got.Logger = &MockLogger{}
got.Vars = map[string]Var{
"bucket": {Value: "wf-bucket", Required: true},
"step_name": {Value: "step1"},
"timeout": {Value: "60m"},
"path": {Value: "./test_sub.wf.json"},
"wf_name": {Value: "wf-name"},
"test-var": {Value: "${ZONE}-this-should-populate-${NAME}"},
}
got.Steps = map[string]*Step{
"${NAME}-${step_name}": {
w: got,
Timeout: "${timeout}",
testType: &mockStep{
populateImpl: stepPop,
},
},
}
got.StorageClient = client
got.externalLogging = true
if err := got.populate(ctx); err != nil {
t.Fatalf("error populating workflow: %v", err)
}
want := New()
// These are difficult to validate and irrelevant, so we cheat.
want.id = got.id
want.Cancel = got.Cancel
want.cleanupHooks = got.cleanupHooks
want.StorageClient = got.StorageClient
want.cloudLoggingClient = got.cloudLoggingClient
want.Logger = got.Logger
want.disks = newDiskRegistry(want)
want.images = newImageRegistry(want)
want.instances = newInstanceRegistry(want)
want.networks = newNetworkRegistry(want)
want.Name = "wf-name"
want.GCSPath = "gs://bar-project-daisy-bkt"
want.Zone = "wf-zone"
want.Project = "bar-project"
want.OAuthPath = tf
want.externalLogging = true
want.Sources = map[string]string{}
want.DefaultTimeout = defaultTimeout
want.defaultTimeout = 10 * time.Minute
want.Vars = map[string]Var{
"bucket": {Value: "wf-bucket", Required: true},
"step_name": {Value: "step1"},
"timeout": {Value: "60m"},
"path": {Value: "./test_sub.wf.json"},
"wf_name": {Value: "wf-name"},
"test-var": {Value: "wf-zone-this-should-populate-wf-name"},
}
want.autovars = got.autovars
want.bucket = "bar-project-daisy-bkt"
want.scratchPath = got.scratchPath
want.sourcesPath = fmt.Sprintf("%s/sources", got.scratchPath)
want.logsPath = fmt.Sprintf("%s/logs", got.scratchPath)
want.outsPath = fmt.Sprintf("%s/outs", got.scratchPath)
want.username = got.username
want.Steps = map[string]*Step{
"wf-name-step1": {
name: "wf-name-step1",
Timeout: "60m",
timeout: time.Duration(60 * time.Minute),
testType: &mockStep{
populateImpl: stepPop,
},
},
}
want.Dependencies = map[string][]string{}
for _, s := range want.Steps {
s.w = want
}
if diffRes := diff(got, want, 0); diffRes != "" {
t.Errorf("parsed workflow does not match expectation: (-got +want)\n%s", diffRes)
}
if !called {
t.Error("did not call step's populate")
}
stepPopErr = errf("error")
wantErr := errf("error populating step \"wf-name-step1\": %v", stepPopErr)
if err := got.populate(ctx); err.Error() != wantErr.Error() {
t.Errorf("did not get proper step populate error: %v != %v", err, wantErr)
}
}
func TestRequiredVars(t *testing.T) {
w := testWorkflow()
tests := []struct {
desc string
vars map[string]Var
shouldErr bool
}{
{"normal case", map[string]Var{"foo": {Value: "foo", Required: true, Description: "foo"}}, false},
{"missing req case", map[string]Var{"foo": {Value: "", Required: true, Description: "foo"}}, true},
}
for _, tt := range tests {
w.Vars = tt.vars
err := w.populate(context.Background())
if tt.shouldErr && err == nil {
t.Errorf("%s: should have erred, but didn't", tt.desc)
} else if !tt.shouldErr && err != nil {
t.Errorf("%s: unexpected error: %v", tt.desc, err)
}
}
}
func testTraverseWorkflow(mockRun func(i int) func(context.Context, *Step) dErr) *Workflow {
// s0---->s1---->s3
// \ /
// --->s2---
// s4
w := testWorkflow()
w.Steps = map[string]*Step{
"s0": {name: "s0", testType: &mockStep{runImpl: mockRun(0)}, w: w},
"s1": {name: "s1", testType: &mockStep{runImpl: mockRun(1)}, w: w},
"s2": {name: "s2", testType: &mockStep{runImpl: mockRun(2)}, w: w},
"s3": {name: "s3", testType: &mockStep{runImpl: mockRun(3)}, w: w},
"s4": {name: "s4", testType: &mockStep{runImpl: mockRun(4)}, w: w},
}
w.Dependencies = map[string][]string{
"s1": {"s0"},
"s2": {"s0"},
"s3": {"s1", "s2"},
}
return w
}
func TestTraverseDAG(t *testing.T) {
ctx := context.Background()
var callOrder []int
errs := make([]dErr, 5)
var rw sync.Mutex
mockRun := func(i int) func(context.Context, *Step) dErr {
return func(_ context.Context, _ *Step) dErr {
rw.Lock()
defer rw.Unlock()
callOrder = append(callOrder, i)
return errs[i]
}
}
// Check call order: s1 and s2 must be after s0, s3 must be after s1 and s2.
checkCallOrder := func() error {
rw.Lock()
defer rw.Unlock()
stepOrderNum := []int{-1, -1, -1, -1, -1}
for i, stepNum := range callOrder {
stepOrderNum[stepNum] = i
}
// If s1 was called, check it was called after s0.
if stepOrderNum[1] != -1 && stepOrderNum[1] < stepOrderNum[0] {
return errors.New("s1 was called before s0")
}
// If s2 was called, check it was called after s0.
if stepOrderNum[2] != -1 && stepOrderNum[2] < stepOrderNum[0] {
return errors.New("s2 was called before s0")
}
// If s3 was called, check it was called after s1 and s2.
if stepOrderNum[3] != -1 {
if stepOrderNum[3] < stepOrderNum[1] {
return errors.New("s3 was called before s1")
}
if stepOrderNum[3] < stepOrderNum[2] {
return errors.New("s3 was called before s2")
}
}
return nil
}
// Normal, good run.
w := testTraverseWorkflow(mockRun)
if err := w.Run(ctx); err != nil {
t.Errorf("unexpected error: %s", err)
}
if err := checkCallOrder(); err != nil {
t.Errorf("call order error: %s", err)
}
callOrder = []int{}
errs = make([]dErr, 5)
// s2 failure.
w = testTraverseWorkflow(mockRun)
errs[2] = errf("failure")
want := w.Steps["s2"].wrapRunError(errs[2])
if err := w.Run(ctx); err.Error() != want.Error() {
t.Errorf("unexpected error: %s != %s", err, want)
}
if err := checkCallOrder(); err != nil {
t.Errorf("call order error: %s", err)
}
}
func TestPrint(t *testing.T) {
data := []byte(`{
"Name": "some-name",
"Project": "some-project",
"Zone": "some-zone",
"GCSPath": "gs://some-bucket/images",
"Vars": {
"instance_name": "i1",
"machine_type": {"Value": "n1-standard-1", "Required": true}
},
"Steps": {
"${instance_name}Delete": {
"DeleteResources": {
"Instances": ["${instance_name}"]
}
}
}
}`)
want := `{
"Name": "some-name",
"Project": "some-project",
"Zone": "some-zone",
"GCSPath": "gs://some-bucket/images",
"Vars": {
"instance_name": {
"Value": "i1"
},
"machine_type": {
"Value": "n1-standard-1",
"Required": true
}
},
"Steps": {
"i1Delete": {
"Timeout": "10m",
"DeleteResources": {
"Instances": [
"i1"
]
}
}
},
"DefaultTimeout": "10m"
}
`
td, err := ioutil.TempDir(os.TempDir(), "")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(td)
tf := filepath.Join(td, "test.wf.json")
ioutil.WriteFile(tf, data, 0600)
got, err := NewFromFile(tf)
if err != nil {
t.Fatal(err)
}
got.ComputeClient, _ = newTestGCEClient()
got.StorageClient, _ = newTestGCSClient()
old := os.Stdout
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
os.Stdout = w
got.Print(context.Background())
w.Close()
os.Stdout = old
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
t.Fatal(err)
}
if diffRes := diff(buf.String(), want, 0); diffRes != "" {
t.Errorf("printed workflow does not match expectation: (-got +want)\n%s", diffRes)
}
}
func | (w *Workflow, want string) error {
if err := w.Validate(context.Background()); err == nil {
return errors.New("expected error, got nil")
} else if err.Error() != want {
return fmt.Errorf("did not get expected error from Validate():\ngot: %q\nwant: %q", err.Error(), want)
}
select {
case <-w.Cancel:
return nil
default:
return errors.New("expected cancel to be closed after error")
}
}
func TestValidateErrors(t *testing.T) {
// Error from validateRequiredFields().
w := testWorkflow()
w.Name = "1"
want := "error validating workflow: workflow field 'Name' must start with a letter and only contain letters, numbers, and hyphens"
if err := testValidateErrors(w, want); err != nil {
t.Error(err)
}
// Error from populate().
w = testWorkflow()
w.Steps = map[string]*Step{"s0": {Timeout: "10", testType: &mockStep{}}}
want = "error populating workflow: error populating step \"s0\": time: missing unit in duration 10"
if err := testValidateErrors(w, want); err != nil {
t.Error(err)
}
// Error from validate().
w = testWorkflow()
w.Steps = map[string]*Step{"s0": {testType: &mockStep{}}}
w.Project = "foo"
want = "error validating workflow: bad project lookup: \"foo\", error: APIError: bad project"
if err := testValidateErrors(w, want); err != nil {
t.Error(err)
}
}
func TestWrite(t *testing.T) {
var buf bytes.Buffer
testBucket := "bucket"
testObject := "object"
var gotObj string
var gotBkt string
nameRgx := regexp.MustCompile(`"name":"([^"].*)"`)
uploadRgx := regexp.MustCompile(`/b/([^/]+)/o?.*uploadType=multipart.*`)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
u := r.URL.String()
m := r.Method
if match := uploadRgx.FindStringSubmatch(u); m == "POST" && match != nil {
body, _ := ioutil.ReadAll(r.Body)
buf.Write(body)
gotObj = nameRgx.FindStringSubmatch(string(body))[1]
gotBkt = match[1]
fmt.Fprintf(w, `{"kind":"storage#object","bucket":"%s","name":"%s"}`, gotBkt, gotObj)
}
}))
gcsClient, err := storage.NewClient(context.Background(), option.WithEndpoint(ts.URL), option.WithHTTPClient(http.DefaultClient))
if err != nil {
t.Fatal(err)
}
l := GCSLogger{
client: gcsClient,
bucket: testBucket,
object: testObject,
ctx: context.Background(),
}
tests := []struct {
test, want string
}{
{"test log 1\n", "test log 1\n"},
{"test log 2\n", "test log 1\ntest log 2\n"},
}
for _, tt := range tests {
l.Write([]byte(tt.test))
if gotObj != testObject {
t.Errorf("object does not match, want: %q, got: %q", testObject, gotObj)
}
if gotBkt != testBucket {
t.Errorf("bucket does not match, want: %q, got: %q", testBucket, gotBkt)
}
if !strings.Contains(buf.String(), tt.want) {
t.Errorf("expected text did not get sent to GCS, want: %q, got: %q", tt.want, buf.String())
}
if l.buf.String() != tt.want {
t.Errorf("buffer does mot match expectation, want: %q, got: %q", tt.want, l.buf.String())
}
}
}
func TestRunStepTimeout(t *testing.T) {
w := testWorkflow()
s, _ := w.NewStep("test")
s.timeout = 1 * time.Nanosecond
s.testType = &mockStep{runImpl: func(ctx context.Context, s *Step) dErr {
time.Sleep(1 * time.Second)
return nil
}}
want := `step "test" did not complete within the specified timeout of 1ns`
if err := w.runStep(context.Background(), s); err == nil || err.Error() != want {
t.Errorf("did not get expected error, got: %q, want: %q", err.Error(), want)
}
}
func TestPopulateClients(t *testing.T) {
w := testWorkflow()
initialComputeClient := w.ComputeClient
w.PopulateClients(context.Background())
if w.ComputeClient != initialComputeClient {
t.Errorf("Should not repopulate compute client.")
}
w.ComputeClient = nil
w.PopulateClients(context.Background())
if w.ComputeClient == nil {
t.Errorf("Did not populate compute client.")
}
initialStorageClient := w.StorageClient
w.PopulateClients(context.Background())
if w.StorageClient != initialStorageClient {
t.Errorf("Should not repopulate storage client.")
}
w.StorageClient = nil
w.PopulateClients(context.Background())
if w.StorageClient == nil {
t.Errorf("Did not populate storage client.")
}
initialCloudLoggingClient := w.cloudLoggingClient
w.PopulateClients(context.Background())
if w.cloudLoggingClient != initialCloudLoggingClient {
t.Errorf("Should not repopulate logging client.")
}
w.cloudLoggingClient = nil
w.externalLogging = false
w.PopulateClients(context.Background())
if w.cloudLoggingClient != nil {
t.Errorf("Should not populate Cloud Logging client.")
}
w.cloudLoggingClient = nil
w.externalLogging = true
w.PopulateClients(context.Background())
if w.cloudLoggingClient == nil {
t.Errorf("Did not populate Cloud Logging client.")
}
}
| testValidateErrors |
DragSource.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var checkDecoratorArguments_1 = require("./utils/checkDecoratorArguments");
var decorateHandler_1 = require("./decorateHandler");
var registerSource_1 = require("./registerSource");
var createSourceFactory_1 = require("./createSourceFactory");
var DragSourceMonitorImpl_1 = require("./DragSourceMonitorImpl");
var SourceConnector_1 = require("./SourceConnector");
var isValidType_1 = require("./utils/isValidType");
var discount_lodash_1 = require("./utils/discount_lodash");
var invariant = require('invariant');
/**
* Decorates a component as a dragsource
* @param type The dragsource type
* @param spec The drag source specification
* @param collect The props collector function
* @param options DnD options
*/
function | (type, spec, collect, options) {
if (options === void 0) { options = {}; }
checkDecoratorArguments_1.default('DragSource', 'type, spec, collect[, options]', type, spec, collect, options);
var getType = type;
if (typeof type !== 'function') {
invariant(isValidType_1.default(type), 'Expected "type" provided as the first argument to DragSource to be ' +
'a string, or a function that returns a string given the current props. ' +
'Instead, received %s. ' +
'Read more: http://react-dnd.github.io/react-dnd/docs/api/drag-source', type);
getType = function () { return type; };
}
invariant(discount_lodash_1.isPlainObject(spec), 'Expected "spec" provided as the second argument to DragSource to be ' +
'a plain object. Instead, received %s. ' +
'Read more: http://react-dnd.github.io/react-dnd/docs/api/drag-source', spec);
var createSource = createSourceFactory_1.default(spec);
invariant(typeof collect === 'function', 'Expected "collect" provided as the third argument to DragSource to be ' +
'a function that returns a plain object of props to inject. ' +
'Instead, received %s. ' +
'Read more: http://react-dnd.github.io/react-dnd/docs/api/drag-source', collect);
invariant(discount_lodash_1.isPlainObject(options), 'Expected "options" provided as the fourth argument to DragSource to be ' +
'a plain object when specified. ' +
'Instead, received %s. ' +
'Read more: http://react-dnd.github.io/react-dnd/docs/api/drag-source', collect);
return function decorateSource(DecoratedComponent) {
return decorateHandler_1.default({
containerDisplayName: 'DragSource',
createHandler: createSource,
registerHandler: registerSource_1.default,
createConnector: function (backend) { return new SourceConnector_1.default(backend); },
createMonitor: function (manager) {
return new DragSourceMonitorImpl_1.default(manager);
},
DecoratedComponent: DecoratedComponent,
getType: getType,
collect: collect,
options: options,
});
};
}
exports.default = DragSource;
| DragSource |
edit_test.go | //go:build unit
// +build unit
package edit_test
import ( | "path/filepath"
"testing"
"github.com/jenkins-x-plugins/jx-gitops/pkg/cmd/requirement/edit"
"github.com/jenkins-x/jx-helpers/v3/pkg/files"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
jxcore "github.com/jenkins-x/jx-api/v4/pkg/apis/core/v4beta1"
)
func TestCmdRequirementsEdit(t *testing.T) {
t.Parallel()
type testData struct {
name string
args []string
callback func(t *testing.T, req *jxcore.RequirementsConfig)
fail bool
initialFile string
}
gitOpsEnabled := filepath.Join("test_data", "gitops-enabled.yml")
tests := []testData{
{
name: "bbs",
args: []string{"--git-kind=bitbucketserver"},
callback: func(t *testing.T, req *jxcore.RequirementsConfig) {
assert.Equal(t, "bitbucketserver", req.Cluster.GitKind, "req.Cluster.GitKind")
},
initialFile: gitOpsEnabled,
},
{
name: "bucket-logs",
args: []string{"--bucket-logs", "gs://foo"},
callback: func(t *testing.T, req *jxcore.RequirementsConfig) {
assert.Equal(t, "gs://foo", req.GetStorageURL("logs"), "req.Storage.Logs.URL")
},
initialFile: gitOpsEnabled,
},
{
name: "bad-git-kind",
args: []string{"--git-kind=gitlob"},
fail: true,
initialFile: gitOpsEnabled,
},
{
name: "bad-secret",
args: []string{"--secret=vaulx"},
fail: true,
initialFile: gitOpsEnabled,
},
}
tmpDir, err := ioutil.TempDir("", "jx-cmd-req-")
require.NoError(t, err, "failed to create temp dir")
require.DirExists(t, tmpDir, "could not create temp dir for running tests")
for i, tt := range tests {
if tt.name == "" {
tt.name = fmt.Sprintf("test%d", i)
}
t.Logf("running test %s", tt.name)
dir := filepath.Join(tmpDir, tt.name)
err = os.MkdirAll(dir, files.DefaultDirWritePermissions)
require.NoError(t, err, "failed to create dir %s", dir)
localReqFile := filepath.Join(dir, jxcore.RequirementsConfigFileName)
if tt.initialFile != "" {
err = files.CopyFile(tt.initialFile, localReqFile)
require.NoError(t, err, "failed to copy %s to %s", tt.initialFile, localReqFile)
require.FileExists(t, localReqFile, "file should have been copied")
}
cmd, _ := edit.NewCmdRequirementsEdit()
args := append(tt.args, "--dir", dir)
err := cmd.ParseFlags(args)
require.NoError(t, err, "failed to parse arguments %#v for test %", args, tt.name)
old := os.Args
os.Args = args
err = cmd.RunE(cmd, args)
if err != nil {
if tt.fail {
t.Logf("got exected failure for test %s: %s", tt.name, err.Error())
continue
}
t.Errorf("test %s reported error: %s", tt.name, err)
continue
}
os.Args = old
// now lets parse the requirements
file := localReqFile
require.FileExists(t, file, "should have generated the requirements file")
req, _, err := jxcore.LoadRequirementsConfig(dir, jxcore.DefaultFailOnValidationError)
require.NoError(t, err, "failed to load requirements from dir %s", dir)
if tt.callback != nil {
tt.callback(t, &req.Spec)
}
}
} | "fmt"
"io/ioutil"
"os" |
comments.module.ts | import { Module } from "@nestjs/common";
import { TypeOrmModule } from "@nestjs/typeorm";
import { CommentsEntity } from "src/entities/comments.entity";
import { PostsEntity } from "src/entities/posts.entity";
import { CommentsController } from "./comments.controller";
import { CommentsService } from "./comments.service";
| providers: [CommentsService],
})
export class CommentsModule { } |
@Module({
imports: [TypeOrmModule.forFeature([CommentsEntity])],
controllers: [CommentsController], |
denon_receiver_xml.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This python program saves test XMLs from denon receiver to current directory.
Usage: python denon_receiver_xml.py --host 192.168.0.250 --prefix AVR-X4100W
:copyright: (c) 2017 by Oliver Goetz.
:license: MIT, see LICENSE for more details.
"""
import argparse
from io import BytesIO
import requests
import xml.etree.ElementTree as ET
from collections import namedtuple
XML = namedtuple("XML", ["port", "type", "path", "tags", "filename"])
SAVED_XML = [XML("80", "post", "/goform/AppCommand.xml",
["GetFriendlyName"],
"AppCommand-setup"),
XML("80", "post", "/goform/AppCommand.xml",
["GetAllZonePowerStatus", "GetAllZoneSource",
"GetRenameSource", "GetDeletedSource",
"GetSurroundModeStatus", "GetToneControl",
"GetAllZoneVolume", "GetAllZoneMuteStatus"],
"AppCommand-update"),
XML("80", "get", "/goform/Deviceinfo.xml", [], "Deviceinfo.xml"),
XML("80", "get", "/goform/formMainZone_MainZoneXmlStatus.xml",
[], "formMainZone_MainZoneXmlStatus"),
XML("80", "get", "/goform/formMainZone_MainZoneXml.xml",
[], "formMainZone_MainZoneXml"),
XML("80", "get", "/goform/formNetAudio_StatusXml.xml",
[], "formNetAudio_StatusXml"),
XML("80", "get", "/goform/formTuner_TunerXml.xml",
[], "formTuner_TunerXml"),
XML("80", "get", "/goform/formTuner_HdXml.xml",
[], "formTuner_HdXml"),
XML("80", "get", "/goform/formZone2_Zone2XmlStatus.xml",
[], "formZone2_Zone2XmlStatus"),
XML("80", "get", "/goform/formZone3_Zone3XmlStatus.xml",
[], "formZone3_Zone3XmlStatus"),
XML("8080", "post", "/goform/AppCommand.xml",
["GetFriendlyName"],
"AppCommand-setup"),
XML("8080", "post", "/goform/AppCommand.xml",
["GetAllZonePowerStatus", "GetAllZoneSource",
"GetRenameSource", "GetDeletedSource",
"GetSurroundModeStatus", "GetToneControl",
"GetAllZoneVolume", "GetAllZoneMuteStatus"],
"AppCommand-update"),
XML("8080", "get", "/goform/Deviceinfo.xml", [],
"Deviceinfo.xml"),
XML("8080", "get", "/goform/formMainZone_MainZoneXmlStatus.xml",
[], "formMainZone_MainZoneXmlStatus"),
XML("8080", "get", "/goform/formMainZone_MainZoneXml.xml",
[], "formMainZone_MainZoneXml"),
XML("8080", "get", "/goform/formNetAudio_StatusXml.xml",
[], "formNetAudio_StatusXml"),
XML("8080", "get", "/goform/formTuner_TunerXml.xml",
[], "formTuner_TunerXml"),
XML("8080", "get", "/goform/formTuner_HdXml.xml",
[], "formTuner_HdXml"),
XML("8080", "get", "/goform/formZone2_Zone2XmlStatus.xml",
[], "formZone2_Zone2XmlStatus"),
XML("8080", "get", "/goform/formZone3_Zone3XmlStatus.xml",
[], "formZone3_Zone3XmlStatus")]
def create_post_body(attribute_list):
# Buffer XML body as binary IO
body = BytesIO()
chunks = [attribute_list[i:i+5] for i in range(
0, len(attribute_list), 5)]
for i, chunk in enumerate(chunks):
# Prepare POST XML body for AppCommand.xml
post_root = ET.Element("tx")
for attribute in chunk:
# Append tags for each attribute
item = ET.Element("cmd")
item.set("id", "1")
item.text = attribute
post_root.append(item)
post_tree = ET.ElementTree(post_root)
post_tree.write(body, encoding="utf-8", xml_declaration=bool(i == 0))
body_bytes = body.getvalue()
body.close()
return body_bytes
def http_post(host, port, path, tags, filename):
filename = filename + "-" + str(port)
data = create_post_body(tags)
try:
r = requests.post(
"http://{host}:{port}/{path}".format(
host=host, port=port, path=path), data=data)
except requests.exceptions.ConnectionError:
print("ConnectionError retrieving data from host {} port {} \
path {}".format(host, port, path))
filename = filename + "-ConnectionError.xml"
with open("./{}".format(filename), "wb") as file:
file.write("".encode())
except requests.exceptions.Timeout:
print("Timeout retrieving data from host {} port {} path {}".format(
host, port, path))
filename = filename + "-Timeout.xml"
with open("./{}".format(filename), "wb") as file:
file.write("".encode())
else:
print("HTTP Status Code of {}: {}".format(path, r.status_code))
filename = filename + "-" + str(r.status_code) + ".xml"
with open("./{}".format(filename), "wb") as file:
file.write(r.content)
def | (host, port, path, filename):
filename = filename + "-" + str(port)
try:
r = requests.get(
"http://{host}:{port}/{path}".format(
host=host, port=port, path=path))
except requests.exceptions.ConnectionError:
print("ConnectionError retrieving data from host {} path {}".format(
host, path))
filename = filename + "-ConnectionError.xml"
with open("./{}".format(filename), "wb") as file:
file.write("".encode())
except requests.exceptions.Timeout:
print("Timeout retrieving data from host {} path {}".format(
host, path))
filename = filename + "-Timeout.xml"
with open("./{}".format(filename), "wb") as file:
file.write("".encode())
else:
print("HTTP Status Code of {}: {}".format(path, r.status_code))
filename = filename + "-" + str(r.status_code) + ".xml"
with open("./{}".format(filename), "wb") as file:
file.write(r.content)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str,
default='192.168.0.250',
help='host of Denon AVR receiver')
parser.add_argument('--prefix', type=str,
default='AVR',
help='prefix of filenames to be saved')
args = parser.parse_args()
for entry in SAVED_XML:
if entry.type == "post":
http_post(args.host, entry.port, entry.path, entry.tags,
"{}-{}".format(args.prefix, entry.filename))
elif entry.type == "get":
http_get(args.host, entry.port, entry.path, "{}-{}".format(
args.prefix, entry.filename))
else:
print("wrong type, only \"get\" and \"post\" are allowed")
| http_get |
md_analysis.py | """
MD Analysis
===========
#. :class:`.MDAnalysis`
Class for converting a molecule to and back from an MDAnalysis object.
"""
import logging
from ...utilities import WrapperNotInstalledException
try:
import MDAnalysis as mda
except ModuleNotFoundError:
mda = None
logger = logging.getLogger(__name__)
class MDAnalysis:
"""
Converter for :class:`stk.Molecule` to and from MDAnalysis.
Examples
--------
An stk molecule can be converted into an MDAnalysis Universe.
.. code-block:: python
| position=np.array((10, 10, 10))
)
universe = stko.MDAnalysis().get_universe(stkmol)
print('R_g:', universe.atoms.radius_of_gyration())
print('B_sphere:', universe.atoms.bsphere())
print('Universe COM:', universe.atoms.center_of_mass())
print('stk centroid:', stkmol.get_centroid())
"""
def __init__(self):
if mda is None:
raise WrapperNotInstalledException(
'MDAnalysis is not installed; see README for '
'installation.'
)
def get_universe(self, mol):
"""
Get an MDAnalysis object.
Parameters
----------
mol : :class:`stk.Molecule`
Molecule to convert.
Returns
-------
:class:`MDAnalysis.Universe`
The MDAnalysis Universe of the molecule.
"""
rdkit_mol = mol.to_rdkit_mol()
return mda.Universe(rdkit_mol) | import stk
import stko
stkmol = stk.BuildingBlock('NCCNCCN').with_centroid( |
test_model_builder.py | import filecmp
from deliverable_model.builder.model.model_builder import ModelBuilder
def test_build(datadir, tmpdir):
model_builder = ModelBuilder()
model_builder.add_keras_h5_model(datadir / "fixture" / "keras_h5_model")
model_builder.save()
config = model_builder.serialize(tmpdir)
assert config == {
"converter_for_request": "converter_for_request",
"converter_for_response": "converter_for_response",
"custom_object_dependency": [],
"type": "keras_h5_model", | dircmp_obj = filecmp.dircmp(datadir / "expected", tmpdir)
assert not dircmp_obj.diff_files
assert model_builder.get_dependency() == ["tensorflow"] | "version": "1.0",
}
|
issue-5478.ts | import "reflect-metadata";
import {Connection} from "../../../src";
import {createTestingConnections, closeTestingConnections} from "../../utils/test-utils";
import {expect} from "chai";
import {UserEntity} from "./entity/UserEntity";
describe("github issues > #5478 Setting enumName doesn't change how migrations get generated", () => {
let connections: Connection[];
before(async () => connections = await createTestingConnections({
migrations: [],
enabledDrivers: ["postgres"],
schemaCreate: true,
dropSchema: true,
entities: [UserEntity],
}));
after(() => closeTestingConnections(connections));
it("should correctly rename enum", () => Promise.all(connections.map(async connection => {
const queryRunner = connection.createQueryRunner();
// add `enumName`
let table = await queryRunner.getTable("user");
const column = table!.findColumnByName("userType")!;
const newColumn = column.clone();
newColumn.enumName = "UserTypeEnum"
// change column
await queryRunner.changeColumn(table!, column, newColumn)
// check if `enumName` changed
table = await queryRunner.getTable("user");
let changedColumn = table!.findColumnByName("userType")!;
expect(changedColumn.enumName).to.equal("UserTypeEnum");
// revert changes
await queryRunner.executeMemoryDownSql()
// check if `enumName` reverted
table = await queryRunner.getTable("user"); | expect(changedColumn.enumName).to.undefined;
await queryRunner.release();
})));
}); | changedColumn = table!.findColumnByName("userType")!; |
utils.py | from pyteomics import mzml
import numpy as np
from collections import defaultdict, Counter
from os import path
import math
from scipy.optimize import curve_fit
import logging
logger = logging.getLogger(__name__)
from .cutils import get_fast_dict, get_and_calc_apex_intensity_and_scan
class MS1OnlyMzML(mzml.MzML):
_default_iter_path = '//spectrum[./*[local-name()="cvParam" and @name="ms level" and @value="1"]]'
_use_index = False
_iterative = False
def noisygaus(x, a, x0, sigma, b):
return a * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) + b
def calibrate_mass(bwidth, mass_left, mass_right, true_md):
bbins = np.arange(-mass_left, mass_right, bwidth)
H1, b1 = np.histogram(true_md, bins=bbins)
b1 = b1 + bwidth
b1 = b1[:-1]
popt, pcov = curve_fit(noisygaus, b1, H1, p0=[1, np.median(true_md), 1, 1])
mass_shift, mass_sigma = popt[1], abs(popt[2])
return mass_shift, mass_sigma, pcov[0][0]
def calc_peptide_features(hills_dict, peptide_features, negative_mode, faims_val, RT_dict, data_start_id):
for pep_feature in peptide_features:
pep_feature['mz'] = pep_feature['hill_mz_1']
pep_feature['isoerror'] = pep_feature['isotopes'][0]['mass_diff_ppm']
pep_feature['isoerror2'] = pep_feature['isotopes'][1]['mass_diff_ppm'] if len(pep_feature['isotopes']) > 1 else -100
pep_feature['nScans'] = hills_dict['hills_lengths'][pep_feature['monoisotope idx']]
pep_feature['massCalib'] = pep_feature['mz'] * pep_feature['charge'] - 1.0072765 * pep_feature['charge'] * (-1 if negative_mode else 1)
hills_dict, _, _ = get_and_calc_apex_intensity_and_scan(hills_dict, pep_feature['monoisotope idx'])
pep_feature['scanApex'] = hills_dict['hills_scan_apex'][pep_feature['monoisotope idx']]
pep_feature['rtApex'] = RT_dict[hills_dict['hills_scan_apex'][pep_feature['monoisotope idx']]+data_start_id]
pep_feature['intensityApex'] = hills_dict['hills_intensity_apex'][pep_feature['monoisotope idx']]
pep_feature['rtStart'] = RT_dict[hills_dict['hills_scan_lists'][pep_feature['monoisotope idx']][0]+data_start_id]
pep_feature['rtEnd'] = RT_dict[hills_dict['hills_scan_lists'][pep_feature['monoisotope idx']][-1]+data_start_id]
pep_feature['mono_hills_scan_lists'] = hills_dict['hills_scan_lists'][pep_feature['monoisotope idx']]
pep_feature['mono_hills_intensity_list'] = hills_dict['hills_intensity_array'][pep_feature['monoisotope idx']]
return peptide_features
def write_output(peptide_features, args, write_header=True):
input_mzml_path = args['file']
if args['o']:
output_file = args['o']
else:
output_file = path.splitext(input_mzml_path)[0]\
+ path.extsep + 'features.tsv'
columns_for_output = [
'massCalib',
'rtApex',
'intensityApex',
'charge',
'nIsotopes',
'nScans',
'mz', | 'rtStart',
'rtEnd',
'FAIMS',
'im',
'mono_hills_scan_lists',
'mono_hills_intensity_list',
'scanApex',
'isoerror2',
]
if write_header:
out_file = open(output_file, 'w')
out_file.write('\t'.join(columns_for_output) + '\n')
out_file.close()
out_file = open(output_file, 'a')
for pep_feature in peptide_features:
out_file.write('\t'.join([str(pep_feature[col]) for col in columns_for_output]) + '\n')
out_file.close()
def centroid_pasef_data(data_for_analyse_tmp, args, mz_step):
cnt_ms1_scans = len(data_for_analyse_tmp)
for spec_idx, z in enumerate(data_for_analyse_tmp):
logger.info('PASEF scans analysis: %d/%d', spec_idx+1, cnt_ms1_scans)
logger.info('number of m/z peaks in scan: %d', len(z['m/z array']))
if 'ignore_ion_mobility' not in z:
mz_ar_new = []
intensity_ar_new = []
ion_mobility_ar_new = []
mz_ar = z['m/z array']
intensity_ar = z['intensity array']
ion_mobility_ar = z['mean inverse reduced ion mobility array']
ion_mobility_accuracy = args['paseftol']
ion_mobility_step = max(ion_mobility_ar) * ion_mobility_accuracy
ion_mobility_ar_fast = (ion_mobility_ar/ion_mobility_step).astype(int)
mz_ar_fast = (mz_ar/mz_step).astype(int)
idx = np.argsort(mz_ar_fast)
mz_ar_fast = mz_ar_fast[idx]
ion_mobility_ar_fast = ion_mobility_ar_fast[idx]
mz_ar = mz_ar[idx]
intensity_ar = intensity_ar[idx]
ion_mobility_ar = ion_mobility_ar[idx]
max_peak_idx = len(mz_ar)
banned_idx = set()
peak_idx = 0
while peak_idx < max_peak_idx:
if peak_idx not in banned_idx:
mass_accuracy_cur = mz_ar[peak_idx] * 1e-6 * args['itol']
mz_val_int = mz_ar_fast[peak_idx]
ion_mob_val_int = ion_mobility_ar_fast[peak_idx]
tmp = [peak_idx, ]
peak_idx_2 = peak_idx + 1
while peak_idx_2 < max_peak_idx:
if peak_idx_2 not in banned_idx:
mz_val_int_2 = mz_ar_fast[peak_idx_2]
if mz_val_int_2 - mz_val_int > 1:
break
elif abs(mz_ar[peak_idx]-mz_ar[peak_idx_2]) <= mass_accuracy_cur:
ion_mob_val_int_2 = ion_mobility_ar_fast[peak_idx_2]
if abs(ion_mob_val_int - ion_mob_val_int_2) <= 1:
tmp.append(peak_idx_2)
peak_idx = peak_idx_2
peak_idx_2 += 1
all_intensity = [intensity_ar[p_id] for p_id in tmp]
i_val_new = sum(all_intensity)
if i_val_new >= args['pasefmini'] and len(all_intensity) >= args['pasefminlh']:
all_mz = [mz_ar[p_id] for p_id in tmp]
all_ion_mob = [ion_mobility_ar[p_id] for p_id in tmp]
mz_val_new = np.average(all_mz, weights=all_intensity)
ion_mob_new = np.average(all_ion_mob, weights=all_intensity)
intensity_ar_new.append(i_val_new)
mz_ar_new.append(mz_val_new)
ion_mobility_ar_new.append(ion_mob_new)
banned_idx.update(tmp)
peak_idx += 1
data_for_analyse_tmp[spec_idx]['m/z array'] = np.array(mz_ar_new)
data_for_analyse_tmp[spec_idx]['intensity array'] = np.array(intensity_ar_new)
data_for_analyse_tmp[spec_idx]['mean inverse reduced ion mobility array'] = np.array(ion_mobility_ar_new)
logger.info('number of m/z peaks in scan after centroiding: %d', len(data_for_analyse_tmp[spec_idx]['m/z array']))
data_for_analyse_tmp = [z for z in data_for_analyse_tmp if len(z['m/z array'] > 0)]
logger.info('Number of MS1 scans after combining ion mobility peaks: %d', len(data_for_analyse_tmp))
# fast_dict = defaultdict(set)
# for peak_idx, (mz_val_int, ion_mob_val_int) in enumerate(zip(mz_ar_fast, ion_mobility_ar_fast)):
# fast_dict[(mz_val_int-1, ion_mob_val_int)].add(peak_idx)
# fast_dict[(mz_val_int, ion_mob_val_int)].add(peak_idx)
# fast_dict[(mz_val_int+1, ion_mob_val_int)].add(peak_idx)
# fast_dict[(mz_val_int-1, ion_mob_val_int-1)].add(peak_idx)
# fast_dict[(mz_val_int, ion_mob_val_int-1)].add(peak_idx)
# fast_dict[(mz_val_int+1, ion_mob_val_int-1)].add(peak_idx)
# fast_dict[(mz_val_int-1, ion_mob_val_int+1)].add(peak_idx)
# fast_dict[(mz_val_int, ion_mob_val_int+1)].add(peak_idx)
# fast_dict[(mz_val_int+1, ion_mob_val_int+1)].add(peak_idx)
# print('HERE2')
# hill_length = []
# peak_idx_array = []
# for peak_idx, (mz_val_int, ion_mob_val_int) in enumerate(zip(mz_ar_fast, ion_mobility_ar_fast)):
# hill_length.append(len(fast_dict[(mz_val_int, ion_mob_val_int)]))
# peak_idx_array.append(peak_idx)
# peak_idx_array = np.array(peak_idx_array)
# print('HERE3')
# added_idx = set()
# idx_sort = np.argsort(hill_length)[::-1]
# for peak_idx in peak_idx_array[idx_sort]:
# if peak_idx not in added_idx:
# mz_val_int = mz_ar_fast[peak_idx]
# ion_mob_val_int = ion_mobility_ar_fast[peak_idx]
# all_idx = set([p_id for p_id in fast_dict[(mz_val_int, ion_mob_val_int)] if p_id not in added_idx])
# if len(all_idx):
# added_idx.update(all_idx)
# all_intensity = [intensity_ar[p_id] for p_id in all_idx]
# i_val_new = sum(all_intensity)
# if i_val_new >= args['pasefmini']:
# all_mz = [mz_ar[p_id] for p_id in all_idx]
# all_ion_mob = [ion_mobility_ar[p_id] for p_id in all_idx]
# mz_val_new = np.average(all_mz, weights=all_intensity)
# ion_mob_new = np.average(all_ion_mob, weights=all_intensity)
# intensity_ar_new.append(i_val_new)
# mz_ar_new.append(mz_val_new)
# ion_mobility_ar_new.append(ion_mob_new)
# data_for_analyse_tmp[spec_idx]['m/z array'] = np.array(mz_ar_new)
# data_for_analyse_tmp[spec_idx]['intensity array'] = np.array(intensity_ar_new)
# data_for_analyse_tmp[spec_idx]['mean inverse reduced ion mobility array'] = np.array(ion_mobility_ar_new)
# data_for_analyse_tmp = [z for z in data_for_analyse_tmp if len(z['m/z array'] > 0)]
# print('Number of MS1 scans after combining ion mobility peaks: ', len(data_for_analyse_tmp))
return data_for_analyse_tmp
def process_profile(data_for_analyse_tmp):
data_for_analyse_tmp_out = []
for z in data_for_analyse_tmp:
best_mz = 0
best_int = 0
best_im = 0
prev_mz = False
prev_int = False
threshold = 0.05
ar1 = []
ar2 = []
ar3 = []
for mzv, intv, imv in zip(z['m/z array'], z['intensity array'], z['mean inverse reduced ion mobility array']):
if prev_mz is False:
best_mz = mzv
best_int = intv
best_im = imv
elif mzv - prev_mz > threshold:
ar1.append(best_mz)
ar2.append(best_int)
ar3.append(best_im)
best_mz = mzv
best_int = intv
best_im = imv
elif best_int > prev_int and intv > prev_int:
ar1.append(best_mz)
ar2.append(best_int)
ar3.append(best_im)
best_mz = mzv
best_int = intv
best_im = imv
elif intv > best_int:
best_mz = mzv
best_int = intv
best_im = imv
prev_mz = mzv
prev_int = intv
ar1.append(best_mz)
ar2.append(best_int)
ar3.append(best_im)
z['m/z array'] = np.array(ar1)
z['intensity array'] = np.array(ar2)
z['mean inverse reduced ion mobility array'] = np.array(ar3)
data_for_analyse_tmp_out.append(z)
return data_for_analyse_tmp_out
def process_tof(data_for_analyse_tmp):
# print(len(z['m/z array']))
universal_dict = {}
cnt = 0
for z in data_for_analyse_tmp:
fast_set = z['m/z array'] // 50
if cnt == 1:
for l in set(fast_set):
idxt = fast_set == l
true_i = np.log10(z['intensity array'])[idxt]
if len(true_i) > 150:
i_left = true_i.min()
i_right = true_i.max()
i_shift, i_sigma, covvalue = calibrate_mass(0.05, i_left, i_right, true_i)
# median_val =
print(i_shift, i_sigma, covvalue)
universal_dict[l] = 10**(i_shift + 3 * i_sigma)#10**(np.median(true_i[idxt]) * 2)
thresholds = [universal_dict.get(zz, 150) for zz in list(fast_set)]
idxt2 = z['intensity array'] <= thresholds
z['intensity array'][idxt2] = -1
idx = z['intensity array'] > 0
z['intensity array'] = z['intensity array'][idx]
z['m/z array'] = z['m/z array'][idx]
z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]
cnt += 1
data_for_analyse_tmp = [z for z in data_for_analyse_tmp if len(z['m/z array'])]
return data_for_analyse_tmp
def process_mzml(args):
input_mzml_path = args['file']
min_intensity = args['mini']
min_mz = args['minmz']
max_mz = args['maxmz']
skipped = 0
data_for_analyse = []
cnt = 0
for z in MS1OnlyMzML(source=input_mzml_path):
if z['ms level'] == 1:
if 'mean inverse reduced ion mobility array' not in z:
z['ignore_ion_mobility'] = True
z['mean inverse reduced ion mobility array'] = np.zeros(len(z['m/z array']))
idx = z['intensity array'] >= min_intensity
z['intensity array'] = z['intensity array'][idx]
z['m/z array'] = z['m/z array'][idx]
z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]
idx = z['m/z array'] >= min_mz
z['m/z array'] = z['m/z array'][idx]
z['intensity array'] = z['intensity array'][idx]
z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]
idx = z['m/z array'] <= max_mz
z['m/z array'] = z['m/z array'][idx]
z['intensity array'] = z['intensity array'][idx]
z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]
idx = np.argsort(z['m/z array'])
z['m/z array'] = z['m/z array'][idx]
z['intensity array'] = z['intensity array'][idx]
z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]
cnt += 1
# if len(data_for_analyse) > 50:
# break
if len(z['m/z array']):
data_for_analyse.append(z)
else:
skipped += 1
logger.info('Number of MS1 scans: %d', len(data_for_analyse))
logger.info('Number of skipped MS1 scans: %d', skipped)
if len(data_for_analyse) == 0:
raise Exception('no MS1 scans in input file')
return data_for_analyse
def process_mzml_dia(args):
input_mzml_path = args['file']
# min_intensity = args['mini']
# min_mz = args['minmz']
# max_mz = args['maxmz']
min_intensity = 0
min_mz = 1
max_mz = 1e6
skipped = 0
data_for_analyse = []
cnt = 0
for z in mzml.read(input_mzml_path):
if z['ms level'] == 2:
if 'mean inverse reduced ion mobility array' not in z:
z['ignore_ion_mobility'] = True
z['mean inverse reduced ion mobility array'] = np.zeros(len(z['m/z array']))
idx = z['intensity array'] >= min_intensity
z['intensity array'] = z['intensity array'][idx]
z['m/z array'] = z['m/z array'][idx]
z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]
idx = z['m/z array'] >= min_mz
z['m/z array'] = z['m/z array'][idx]
z['intensity array'] = z['intensity array'][idx]
z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]
idx = z['m/z array'] <= max_mz
z['m/z array'] = z['m/z array'][idx]
z['intensity array'] = z['intensity array'][idx]
z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]
idx = np.argsort(z['m/z array'])
z['m/z array'] = z['m/z array'][idx]
z['intensity array'] = z['intensity array'][idx]
z['mean inverse reduced ion mobility array'] = z['mean inverse reduced ion mobility array'][idx]
cnt += 1
# if len(data_for_analyse) > 5000:
# break
if len(z['m/z array']):
data_for_analyse.append(z)
else:
skipped += 1
logger.info('Number of MS2 scans: %d', len(data_for_analyse))
logger.info('Number of skipped MS2 scans: %d', skipped)
return data_for_analyse | |
uci.rs | // Copyright 2020-2021, Benjamin Ludewig, Florian Bonetti, Jeffrey Munstermann, Luca Nittscher, Hugo Damer, Michael Bach
// SPDX-License-Identifier: MIT OR Apache-2.0
#[cfg(not(feature = "uci"))]
pub use mock::*;
#[cfg(feature = "uci")]
pub use rust_uci::*;
#[cfg(not(feature = "uci"))]
#[allow(clippy::unused_self)]
mod mock {
use crate::error::{self, Result};
pub struct Uci {}
impl Uci {
pub fn new() -> Result<Self> {
Ok(Self {})
}
pub fn set_config_dir(&mut self, config_dir: &str) -> Result<()> {
debug!("set_config_dir {}", config_dir);
Ok(())
}
pub fn | (&mut self, save_dir: &str) -> Result<()> {
debug!("set_save_dir {}", save_dir);
Ok(())
}
pub fn revert(&mut self, package: &str) -> Result<()> {
debug!("revert {}", package);
Ok(())
}
pub fn delete(&mut self, key: &str) -> Result<()> {
debug!("delete {}", key);
Ok(())
}
pub fn get(&mut self, key: &str) -> Result<String> {
debug!("get {}", key);
error::NoneError {}.fail()
}
pub fn set(&mut self, key: &str, value: &str) -> Result<()> {
debug!("set {}={}", key, value);
Ok(())
}
pub fn commit(&mut self, package: &str) -> Result<()> {
debug!("commit {}", package);
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use std::{fs, fs::File, io::Read};
use super::*;
use crate::error::Result;
fn init() -> Result<Uci> {
let _ = env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("warn"))
.is_test(true)
.try_init();
let mut uci = Uci::new()?;
uci.set_config_dir("tests/config")?;
uci.set_save_dir("/tmp/.uci_tests")?;
Ok(uci)
}
#[test]
fn test_reading_key() -> Result<()> {
let mut uci = init()?;
assert_eq!(uci.get("network.wan")?, "interface");
assert_eq!(uci.get("network.@interface[0]")?, "interface");
assert_eq!(uci.get("network.a")?, "alias");
assert_eq!(uci.get("network.@alias[-1]")?, "alias");
assert_eq!(uci.get("network.wan.proto")?, "dhcp");
assert_eq!(uci.get("network.@interface[-1].proto")?, "dhcp");
assert_eq!(uci.get("network.lan.proto")?, "static");
assert_eq!(uci.get("network.@interface[0].proto")?, "static");
assert_eq!(uci.get("broken.a").is_err(), true);
assert_eq!(uci.get("broken.a.b").is_err(), true);
assert_eq!(uci.get("inexistant.c").is_err(), true);
assert_eq!(uci.get("inexistant.c.d").is_err(), true);
Ok(())
}
#[test]
fn test_writing_key() -> Result<()> {
let mut uci = init()?;
File::create("tests/config/new_network")?;
uci.set("new_network.a", "alias")?;
uci.set("new_network.a.interface", "lan")?;
uci.set("new_network.b", "alias")?;
uci.set("new_network.b.interface", "lan")?;
uci.set("new_network.lan", "interface")?;
uci.set("new_network.lan.proto", "static")?;
uci.set("new_network.lan.ifname", "eth0")?;
uci.set("new_network.lan.test", "123")?;
uci.set("new_network.lan.enabled", "off")?;
uci.set("new_network.lan.ipaddr", "2.3.4.5")?;
uci.set("new_network.wan", "interface")?;
uci.set("new_network.wan.proto", "dhcp")?;
uci.set("new_network.wan.ifname", "eth1")?;
uci.set("new_network.wan.enabled", "on")?;
uci.set("new_network.wan.aliases", "c d")?;
uci.set("new_network.c", "alias")?;
uci.set("new_network.c.interface", "wan")?;
uci.set("new_network.d", "alias")?;
uci.set("new_network.d.interface", "wan")?;
uci.commit("new_network")?;
let mut file = File::open("tests/config/new_network")?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let mut file = File::open("tests/config/network")?;
let mut actual_contents = String::new();
file.read_to_string(&mut actual_contents)?;
fs::remove_file("tests/config/new_network")?;
assert_eq!(contents, actual_contents);
Ok(())
}
#[test]
fn test_delete() -> Result<()> {
let mut uci = init()?;
assert_eq!(uci.get("network.wan.proto")?, "dhcp");
assert_eq!(uci.get("network.wan.ifname")?, "eth1");
uci.delete("network.wan")?;
assert_eq!(uci.get("network.wan.proto").is_err(), true);
assert_eq!(uci.get("network.wan.ifname").is_err(), true);
uci.revert("network")?;
assert_eq!(uci.get("network.wan.proto")?, "dhcp");
assert_eq!(uci.get("network.wan.ifname")?, "eth1");
Ok(())
}
}
| set_save_dir |
day22.rs | #[macro_use]
extern crate clap;
use clap::App;
extern crate regex;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use regex::Regex;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
#[derive(Clone,Debug)]
struct GameState {
player : Character,
boss : Character,
players_turn : bool,
turns : u32,
minimum_mana_usage : Arc<AtomicUsize>
}
impl GameState {
fn new(player : Character, boss : Character) -> GameState {
GameState {
player : player,
boss : boss,
players_turn : true,
turns : 0,
minimum_mana_usage : Arc::new(AtomicUsize::new(std::usize::MAX))
}
}
fn apply_boss_damage(&mut self) {
if !self.players_turn {
let damage = std::cmp::max(1, self.boss.damage - self.player.armor);
self.player.hp -= damage as i32;
}
}
fn apply_spells(&mut self) {
self.player.armor = 0; // Init to no shield
for s in &mut self.player.active_spells {
self.player.mana += s.mana_gain;// Recharge
self.player.armor += s.armor;// Shield
s.turns -= 1;
}
self.player.active_spells.retain(|x| x.turns != 0);
for s in &mut self.boss.active_spells {
self.boss.hp -= s.damage as i32; // Poison
s.turns -= 1;
}
self.boss.active_spells.retain(|x| x.turns != 0);
}
fn spell_in_effect(&self, spell : &Spell) -> bool {
let mut on = false;
for s in &self.player.active_spells {
if s.name == spell.name {
on = true;
break;
}
}
if !on {
for s in &self.boss.active_spells {
if s.name == spell.name {
on = true;
break;
}
}
}
on
}
fn add_spell(&mut self, spell : Spell) {
self.player.mana -= spell.mana_cost;
self.player.mana_usage += spell.mana_cost;
if spell.turns > 0 {
if spell.name == "Shield" || spell.name == "Recharge" {
self.player.active_spells.push(spell);
} else {
self.boss.active_spells.push(spell);
}
} else {
self.player.hp += spell.heal as i32;
self.boss.hp -= spell.damage as i32;
}
}
}
#[derive(Clone,Debug)]
struct Character {
hp : i32,
damage : u32,
armor : u32,
mana : u32,
mana_usage : u32,
active_spells : Vec<Spell>
}
impl Character {
fn new(hp : i32, damage : u32, mana : u32) -> Character {
Character {
hp : hp,
damage : damage,
armor : 0,
mana : mana,
mana_usage : 0,
active_spells : Vec::new()
}
}
}
#[derive(Clone,Debug)]
struct Spell {
name : String,
mana_cost : u32,
damage : u32,
heal : u32,
armor : u32,
mana_gain : u32,
turns : u32, // If turns = 0, instant
}
impl Spell {
fn new(name : &str, cost : u32, damage : u32, heal : u32, armor : u32, gain : u32, turns : u32) -> Spell {
Spell {
name : String::from(name),
mana_cost : cost,
damage : damage,
heal : heal,
armor : armor,
mana_gain : gain,
turns : turns
}
}
}
fn main() {
let yaml = load_yaml!("cli.yml");
let matches = App::from_yaml(yaml).get_matches();
let hp = matches.value_of("HP").unwrap_or("50").parse::<i32>().unwrap();
let mana = matches.value_of("MANA").unwrap_or("500").parse::<u32>().unwrap();
let filename = matches.value_of("FILE").unwrap();
let mut file = match File::open(filename) {
Err(why) => panic!("Couldn't open {}: {}", filename, Error::description(&why)),
Ok(file) => file,
};
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => panic!("Couldn't read {}: {}", filename, Error::description(&why)),
Ok(_) => println!("Read file {}", filename),
}
let boss = parse_input(s.trim().split('\n').collect());
let player = Character::new(hp, 0, mana);
let spells = build_spell_list();
let easy = GameState::new(player.clone(), boss.clone());
find_min_mana_usage(easy.clone(), &spells, false);
println!("Easy Mode: Min mana used: {}", easy.minimum_mana_usage.load(Ordering::SeqCst));
let hard = GameState::new(player.clone(), boss.clone());
find_min_mana_usage(hard.clone(), &spells, true);
println!("Hard Mode: Min mana used: {}", hard.minimum_mana_usage.load(Ordering::SeqCst));
}
fn find_min_mana_usage(state : GameState, spells : &Vec<Spell>, hard_mode : bool) -> u32 {
let mut mana_usage = std::u32::MAX;
let mut game = state.clone();
| game.turns += 1;
if hard_mode && game.players_turn {
game.player.hp -= 1;
}
if game.player.hp > 0 {
game.apply_spells(); //Apply existing spells to player and boss
if game.boss.hp <= 0 {
// Player won
mana_usage = game.player.mana_usage;
} else if game.players_turn {
game.players_turn = false;
for s in spells {
if !game.spell_in_effect(s) && game.player.mana >= s.mana_cost && ((game.player.mana_usage + s.mana_cost) as usize) < game.minimum_mana_usage.load(Ordering::SeqCst) {
let mut new_game_state = game.clone();
new_game_state.add_spell(s.clone());
let new_mana_usage = find_min_mana_usage(new_game_state, spells, hard_mode);
if new_mana_usage < mana_usage && (new_mana_usage as usize) < game.minimum_mana_usage.load(Ordering::SeqCst) {
mana_usage = new_mana_usage;
game.minimum_mana_usage.store(mana_usage as usize, Ordering::SeqCst);
}
}
}
} else {
// Run boss code
game.apply_boss_damage();
if game.player.hp > 0 {
game.players_turn = true;
// If neither player or boss won, start next round
let new_mana_usage = find_min_mana_usage(game.clone(), spells, hard_mode);
if new_mana_usage < mana_usage && (new_mana_usage as usize) < game.minimum_mana_usage.load(Ordering::SeqCst) {
mana_usage = new_mana_usage;
game.minimum_mana_usage.store(mana_usage as usize, Ordering::SeqCst);
}
}
}
}
mana_usage
}
fn build_spell_list() -> Vec<Spell> {
let mut spells = Vec::new();
spells.push(Spell::new("Shield", 113, 0, 0, 7, 0, 6));
spells.push(Spell::new("Poison", 173, 3, 0, 0, 0, 6));
spells.push(Spell::new("Magic Missle", 53, 4, 0, 0, 0, 0));
spells.push(Spell::new("Drain", 73, 2, 2, 0, 0, 0));
spells.push(Spell::new("Recharge", 229, 0, 0, 0, 101, 5));
spells
}
fn parse_input(input : Vec<&str>) -> Character {
let mut hp = 0;
let mut damage = 0;
for s in input {
if s.trim().len() > 0 {
let re = Regex::new(r"(.*): (\d+)").unwrap();
for cap in re.captures_iter(s) {
if cap.at(1).unwrap() == "Hit Points" {
hp = cap.at(2).unwrap().parse::<i32>().unwrap();
} else if cap.at(1).unwrap() == "Damage" {
damage = cap.at(2).unwrap().parse::<u32>().unwrap();
}
}
}
}
Character::new(hp, damage, 0)
} | |
index.js | import DetailUser from "./pages/detailUser";
import MainUser from "./pages/mainUser";
function UserPage() {
const match = useRouteMatch();
return (
<Switch>
<Route path={`${match.url}/`} exact component={MainUser} />
<Route path={`${match.url}/:id`} component={DetailUser} />
</Switch>
);
}
export default UserPage; | import React from "react";
import { Route, Switch, useRouteMatch } from "react-router-dom"; |
|
test03.js | function AVLTree() {
| this.left = null;
this.right = null;
};
let root = null;
this.getRoot = function () {
return root;
};
let heightNode = function (node) {
if (node === null) {
return -1;
} else {
return Math.max(heightNode(node.left), heightNode(node.right)) + 1;
}
};
let rotationLL = function (node) {
let tmp = node.left;
node.left = tmp.right;
tmp.right = node;
return tmp;
};
let rotationRR = function (node) {
let tmp = node.right;
node.right = tmp.left;
tmp.left = node;
return tmp;
};
let rotationLR = function (node) {
node.left = rotationRR(node.left);
return rotationLL(node);
};
let rotationRL = function (node) {
node.right = rotationLL(node.right);
return rotationRR(node);
};
let insertNode = function (node, ele) {
if (node === null) {
node = new Node(ele);
} else if (ele < node.key) {
node.left = insertNode(node.left, ele);
if (node.left !== null) {
if ((heightNode(node.left) - heightNode(node.right)) > 1) {
if (ele < node.left.key) {
node = rotationLL(node);
} else {
node = rotationLR(node);
}
}
}
} else if (ele > node.key) {
node.right = insertNode(node.right, ele);
if (node.right !== null) {
if ((heightNode(node.right) - heightNode(node.left)) > 1) {
if (ele > node.right.key) {
node = rotationRR(node);
} else {
node = rotationRL(node);
}
}
}
}
return node;
};
this.insert = function (ele) {
root = insertNode(root, ele);
};
let parentNode;
let nodeToBeDeleted;
let removeNode = function (node, ele) {
if (node === null) {
return null;
}
parentNode = node;
if (ele < node.key) {
node.left = removeNode(node.left, ele);
} else {
nodeToBeDeleted = node;
node.right = removeNode(node.right, ele);
}
if (node === parentNode) {
if (nodeToBeDeleted !== null && ele === nodeToBeDeleted.key) {
if (nodeToBeDeleted === parentNode) {
node = node.left;
} else {
let tmp = nodeToBeDeleted.key;
nodeToBeDeleted.key = parentNode.key;
parentNode.key = tmp;
node = node.right;
}
}
} else {
if (node.left === undefined) {
node.left = null;
}
if (node.right === undefined) {
node.right = null;
}
if ((heightNode(node.left) - heightNode(node.right)) === 2) {
if (ele < node.left.key) {
node = rotationLR(node);
} else {
node = rotationLL(node);
}
}
if ((heightNode(node.right) - heightNode(node.left)) === 2) {
if (ele > node.right.key) {
node = rotationRL(node);
} else {
node = rotationRR(node);
}
}
}
return node;
};
this.remove = function (ele) {
parentNode = null;
nodeToBeDeleted = null;
root = removeNode(root, ele);
};
this.inOrderTraverse = function (callback) {
inOrderTraverseNode(root, callback);
};
let inOrderTraverseNode = function (node, callback) {
if (node !== null) {
inOrderTraverseNode(node.left, callback);
callback(node.key);
inOrderTraverseNode(node.right, callback);
}
};
this.preOrderTraverse = function (callback) {
preOrderTraverseNode(root, callback);
};
let preOrderTraverseNode = function (node, callback) {
if (node !== null) {
callback(node.key);
preOrderTraverseNode(node.left, callback);
preOrderTraverseNode(node.right, callback);
}
};
};
var avlTree = new AVLTree();
function printNode(value) {
console.log(value);
}
/*avlTree.insert(1);
avlTree.insert(2);
avlTree.insert(3);
avlTree.insert(4);
avlTree.insert(5);
avlTree.insert(6);
avlTree.insert(7);
avlTree.insert(14);
avlTree.insert(15);
avlTree.insert(13);
avlTree.insert(12);
avlTree.insert(11);*/
//RR rotation
avlTree.insert(50);
avlTree.insert(30);
avlTree.insert(70);
avlTree.insert(60);
avlTree.insert(80);
avlTree.insert(90);
//LL rotation
/*avlTree.insert(50);
avlTree.insert(30);
avlTree.insert(70);
avlTree.insert(10);
avlTree.insert(40);
avlTree.insert(5);*/
//LR rotation
/*avlTree.insert(50);
avlTree.insert(30);
avlTree.insert(70);
avlTree.insert(40);
avlTree.insert(10);
avlTree.insert(35);*/
//RL rotation
/*avlTree.insert(70);
avlTree.insert(50);
avlTree.insert(80);
avlTree.insert(72);
avlTree.insert(90);
avlTree.insert(75);*/
/*avlTree.remove(12);
avlTree.remove(15);
avlTree.remove(11);
avlTree.remove(14);
avlTree.remove(13);
avlTree.remove(7);
avlTree.remove(6);
avlTree.remove(2);
avlTree.remove(4);*/
console.log('***** in-order transverse *****');
avlTree.inOrderTraverse(printNode);
console.log('***** pre-order transverse *****');
avlTree.preOrderTraverse(printNode); | let Node = function (key) {
this.key = key;
|
gdb_core_transaction.go | // Copyright GoFrame Author(https://goframe.org). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with this file,
// You can obtain one at https://github.com/gogf/gf.
package gdb
import (
"context"
"database/sql"
"github.com/gogf/gf/errors/gcode"
"github.com/gogf/gf/errors/gerror"
"reflect"
"github.com/gogf/gf/container/gtype"
"github.com/gogf/gf/os/gtime"
"github.com/gogf/gf/util/gconv"
"github.com/gogf/gf/util/guid"
"github.com/gogf/gf/text/gregex"
)
// TX is the struct for transaction management.
type TX struct {
db DB // db is the current gdb database manager.
tx *sql.Tx // tx is the raw and underlying transaction manager.
ctx context.Context // ctx is the context for this transaction only.
master *sql.DB // master is the raw and underlying database manager.
transactionId string // transactionId is an unique id generated by this object for this transaction.
transactionCount int // transactionCount marks the times that Begins.
isClosed bool // isClosed marks this transaction has already been committed or rolled back.
}
const (
transactionPointerPrefix = "transaction"
contextTransactionKeyPrefix = "TransactionObjectForGroup_"
transactionIdForLoggerCtx = "TransactionId"
)
var (
transactionIdGenerator = gtype.NewUint64()
)
// Begin starts and returns the transaction object.
// You should call Commit or Rollback functions of the transaction object
// if you no longer use the transaction. Commit or Rollback functions will also
// close the transaction automatically.
func (c *Core) Begin() (tx *TX, err error) {
return c.doBeginCtx(c.GetCtx())
}
func (c *Core) doBeginCtx(ctx context.Context) (*TX, error) {
if master, err := c.db.Master(); err != nil {
return nil, err
} else {
var (
tx *TX
sqlStr = "BEGIN"
mTime1 = gtime.TimestampMilli()
rawTx, err = master.Begin()
mTime2 = gtime.TimestampMilli()
sqlObj = &Sql{
Sql: sqlStr,
Type: "DB.Begin",
Args: nil,
Format: sqlStr,
Error: err,
Start: mTime1,
End: mTime2,
Group: c.db.GetGroup(),
IsTransaction: true,
}
)
if err == nil {
tx = &TX{
db: c.db,
tx: rawTx,
ctx: context.WithValue(ctx, transactionIdForLoggerCtx, transactionIdGenerator.Add(1)),
master: master,
transactionId: guid.S(),
}
ctx = tx.ctx
}
// Tracing and logging.
c.addSqlToTracing(ctx, sqlObj)
if c.db.GetDebug() {
c.writeSqlToLogger(ctx, sqlObj)
}
return tx, err
}
}
// Transaction wraps the transaction logic using function `f`.
// It rollbacks the transaction and returns the error from function `f` if
// it returns non-nil error. It commits the transaction and returns nil if
// function `f` returns nil.
//
// Note that, you should not Commit or Rollback the transaction in function `f`
// as it is automatically handled by this function.
func (c *Core) Transaction(ctx context.Context, f func(ctx context.Context, tx *TX) error) (err error) {
var (
tx *TX
)
if ctx == nil {
ctx = c.GetCtx()
}
// Check transaction object from context.
tx = TXFromCtx(ctx, c.db.GetGroup())
if tx != nil {
return tx.Transaction(ctx, f)
}
tx, err = c.doBeginCtx(ctx)
if err != nil {
return err
}
// Inject transaction object into context.
tx.ctx = WithTX(tx.ctx, tx)
defer func() {
if err == nil {
if exception := recover(); exception != nil {
if v, ok := exception.(error); ok {
err = v
} else {
err = gerror.NewCodef(gcode.CodeInternalError, "%+v", exception)
}
}
}
if err != nil {
if e := tx.Rollback(); e != nil {
err = e
}
} else {
if e := tx.Commit(); e != nil {
err = e
}
}
}()
err = f(tx.ctx, tx)
return
}
// WithTX injects given transaction object into context and returns a new context.
func WithTX(ctx context.Context, tx *TX) context.Context {
if tx == nil {
return ctx
}
// Check repeat injection from given.
group := tx.db.GetGroup()
if tx := TXFromCtx(ctx, group); tx != nil && tx.db.GetGroup() == group {
return ctx
}
dbCtx := tx.db.GetCtx()
if tx := TXFromCtx(dbCtx, group); tx != nil && tx.db.GetGroup() == group {
return dbCtx
}
// Inject transaction object and id into context.
ctx = context.WithValue(ctx, transactionKeyForContext(group), tx)
return ctx
}
// TXFromCtx retrieves and returns transaction object from context.
// It is usually used in nested transaction feature, and it returns nil if it is not set previously.
func | (ctx context.Context, group string) *TX {
if ctx == nil {
return nil
}
v := ctx.Value(transactionKeyForContext(group))
if v != nil {
tx := v.(*TX)
if tx.IsClosed() {
return nil
}
tx.ctx = ctx
return tx
}
return nil
}
// transactionKeyForContext forms and returns a string for storing transaction object of certain database group into context.
func transactionKeyForContext(group string) string {
return contextTransactionKeyPrefix + group
}
// transactionKeyForNestedPoint forms and returns the transaction key at current save point.
func (tx *TX) transactionKeyForNestedPoint() string {
return tx.db.GetCore().QuoteWord(transactionPointerPrefix + gconv.String(tx.transactionCount))
}
// Ctx sets the context for current transaction.
func (tx *TX) Ctx(ctx context.Context) *TX {
tx.ctx = ctx
return tx
}
// Commit commits current transaction.
// Note that it releases previous saved transaction point if it's in a nested transaction procedure,
// or else it commits the hole transaction.
func (tx *TX) Commit() error {
if tx.transactionCount > 0 {
tx.transactionCount--
_, err := tx.Exec("RELEASE SAVEPOINT " + tx.transactionKeyForNestedPoint())
return err
}
var (
sqlStr = "COMMIT"
mTime1 = gtime.TimestampMilli()
err = tx.tx.Commit()
mTime2 = gtime.TimestampMilli()
sqlObj = &Sql{
Sql: sqlStr,
Type: "TX.Commit",
Args: nil,
Format: sqlStr,
Error: err,
Start: mTime1,
End: mTime2,
Group: tx.db.GetGroup(),
IsTransaction: true,
}
)
tx.isClosed = true
tx.db.GetCore().addSqlToTracing(tx.ctx, sqlObj)
if tx.db.GetDebug() {
tx.db.GetCore().writeSqlToLogger(tx.ctx, sqlObj)
}
return err
}
// Rollback aborts current transaction.
// Note that it aborts current transaction if it's in a nested transaction procedure,
// or else it aborts the hole transaction.
func (tx *TX) Rollback() error {
if tx.transactionCount > 0 {
tx.transactionCount--
_, err := tx.Exec("ROLLBACK TO SAVEPOINT " + tx.transactionKeyForNestedPoint())
return err
}
var (
sqlStr = "ROLLBACK"
mTime1 = gtime.TimestampMilli()
err = tx.tx.Rollback()
mTime2 = gtime.TimestampMilli()
sqlObj = &Sql{
Sql: sqlStr,
Type: "TX.Rollback",
Args: nil,
Format: sqlStr,
Error: err,
Start: mTime1,
End: mTime2,
Group: tx.db.GetGroup(),
IsTransaction: true,
}
)
tx.isClosed = true
tx.db.GetCore().addSqlToTracing(tx.ctx, sqlObj)
if tx.db.GetDebug() {
tx.db.GetCore().writeSqlToLogger(tx.ctx, sqlObj)
}
return err
}
// IsClosed checks and returns this transaction has already been committed or rolled back.
func (tx *TX) IsClosed() bool {
return tx.isClosed
}
// Begin starts a nested transaction procedure.
func (tx *TX) Begin() error {
_, err := tx.Exec("SAVEPOINT " + tx.transactionKeyForNestedPoint())
if err != nil {
return err
}
tx.transactionCount++
return nil
}
// SavePoint performs `SAVEPOINT xxx` SQL statement that saves transaction at current point.
// The parameter `point` specifies the point name that will be saved to server.
func (tx *TX) SavePoint(point string) error {
_, err := tx.Exec("SAVEPOINT " + tx.db.GetCore().QuoteWord(point))
return err
}
// RollbackTo performs `ROLLBACK TO SAVEPOINT xxx` SQL statement that rollbacks to specified saved transaction.
// The parameter `point` specifies the point name that was saved previously.
func (tx *TX) RollbackTo(point string) error {
_, err := tx.Exec("ROLLBACK TO SAVEPOINT " + tx.db.GetCore().QuoteWord(point))
return err
}
// Transaction wraps the transaction logic using function `f`.
// It rollbacks the transaction and returns the error from function `f` if
// it returns non-nil error. It commits the transaction and returns nil if
// function `f` returns nil.
//
// Note that, you should not Commit or Rollback the transaction in function `f`
// as it is automatically handled by this function.
func (tx *TX) Transaction(ctx context.Context, f func(ctx context.Context, tx *TX) error) (err error) {
if ctx != nil {
tx.ctx = ctx
}
// Check transaction object from context.
if TXFromCtx(tx.ctx, tx.db.GetGroup()) == nil {
// Inject transaction object into context.
tx.ctx = WithTX(tx.ctx, tx)
}
err = tx.Begin()
if err != nil {
return err
}
defer func() {
if err == nil {
if exception := recover(); exception != nil {
if v, ok := exception.(error); ok {
err = v
} else {
err = gerror.NewCodef(gcode.CodeInternalError, "%+v", exception)
}
}
}
if err != nil {
if e := tx.Rollback(); e != nil {
err = e
}
} else {
if e := tx.Commit(); e != nil {
err = e
}
}
}()
err = f(tx.ctx, tx)
return
}
// Query does query operation on transaction.
// See Core.Query.
func (tx *TX) Query(sql string, args ...interface{}) (rows *sql.Rows, err error) {
return tx.db.DoQuery(tx.ctx, &txLink{tx.tx}, sql, args...)
}
// Exec does none query operation on transaction.
// See Core.Exec.
func (tx *TX) Exec(sql string, args ...interface{}) (sql.Result, error) {
return tx.db.DoExec(tx.ctx, &txLink{tx.tx}, sql, args...)
}
// Prepare creates a prepared statement for later queries or executions.
// Multiple queries or executions may be run concurrently from the
// returned statement.
// The caller must call the statement's Close method
// when the statement is no longer needed.
func (tx *TX) Prepare(sql string) (*Stmt, error) {
return tx.db.DoPrepare(tx.ctx, &txLink{tx.tx}, sql)
}
// GetAll queries and returns data records from database.
func (tx *TX) GetAll(sql string, args ...interface{}) (Result, error) {
rows, err := tx.Query(sql, args...)
if err != nil || rows == nil {
return nil, err
}
defer rows.Close()
return tx.db.GetCore().convertRowsToResult(rows)
}
// GetOne queries and returns one record from database.
func (tx *TX) GetOne(sql string, args ...interface{}) (Record, error) {
list, err := tx.GetAll(sql, args...)
if err != nil {
return nil, err
}
if len(list) > 0 {
return list[0], nil
}
return nil, nil
}
// GetStruct queries one record from database and converts it to given struct.
// The parameter `pointer` should be a pointer to struct.
func (tx *TX) GetStruct(obj interface{}, sql string, args ...interface{}) error {
one, err := tx.GetOne(sql, args...)
if err != nil {
return err
}
return one.Struct(obj)
}
// GetStructs queries records from database and converts them to given struct.
// The parameter `pointer` should be type of struct slice: []struct/[]*struct.
func (tx *TX) GetStructs(objPointerSlice interface{}, sql string, args ...interface{}) error {
all, err := tx.GetAll(sql, args...)
if err != nil {
return err
}
return all.Structs(objPointerSlice)
}
// GetScan queries one or more records from database and converts them to given struct or
// struct array.
//
// If parameter `pointer` is type of struct pointer, it calls GetStruct internally for
// the conversion. If parameter `pointer` is type of slice, it calls GetStructs internally
// for conversion.
func (tx *TX) GetScan(pointer interface{}, sql string, args ...interface{}) error {
t := reflect.TypeOf(pointer)
k := t.Kind()
if k != reflect.Ptr {
return gerror.NewCodef(gcode.CodeInvalidParameter, "params should be type of pointer, but got: %v", k)
}
k = t.Elem().Kind()
switch k {
case reflect.Array, reflect.Slice:
return tx.GetStructs(pointer, sql, args...)
case reflect.Struct:
return tx.GetStruct(pointer, sql, args...)
}
return gerror.NewCodef(gcode.CodeInvalidParameter, "element type should be type of struct/slice, unsupported: %v", k)
}
// GetValue queries and returns the field value from database.
// The sql should query only one field from database, or else it returns only one
// field of the result.
func (tx *TX) GetValue(sql string, args ...interface{}) (Value, error) {
one, err := tx.GetOne(sql, args...)
if err != nil {
return nil, err
}
for _, v := range one {
return v, nil
}
return nil, nil
}
// GetCount queries and returns the count from database.
func (tx *TX) GetCount(sql string, args ...interface{}) (int, error) {
if !gregex.IsMatchString(`(?i)SELECT\s+COUNT\(.+\)\s+FROM`, sql) {
sql, _ = gregex.ReplaceString(`(?i)(SELECT)\s+(.+)\s+(FROM)`, `$1 COUNT($2) $3`, sql)
}
value, err := tx.GetValue(sql, args...)
if err != nil {
return 0, err
}
return value.Int(), nil
}
// Insert does "INSERT INTO ..." statement for the table.
// If there's already one unique record of the data in the table, it returns error.
//
// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// The parameter `batch` specifies the batch operation count when given data is slice.
func (tx *TX) Insert(table string, data interface{}, batch ...int) (sql.Result, error) {
if len(batch) > 0 {
return tx.Model(table).Ctx(tx.ctx).Data(data).Batch(batch[0]).Insert()
}
return tx.Model(table).Ctx(tx.ctx).Data(data).Insert()
}
// InsertIgnore does "INSERT IGNORE INTO ..." statement for the table.
// If there's already one unique record of the data in the table, it ignores the inserting.
//
// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// The parameter `batch` specifies the batch operation count when given data is slice.
func (tx *TX) InsertIgnore(table string, data interface{}, batch ...int) (sql.Result, error) {
if len(batch) > 0 {
return tx.Model(table).Ctx(tx.ctx).Data(data).Batch(batch[0]).InsertIgnore()
}
return tx.Model(table).Ctx(tx.ctx).Data(data).InsertIgnore()
}
// InsertAndGetId performs action Insert and returns the last insert id that automatically generated.
func (tx *TX) InsertAndGetId(table string, data interface{}, batch ...int) (int64, error) {
if len(batch) > 0 {
return tx.Model(table).Ctx(tx.ctx).Data(data).Batch(batch[0]).InsertAndGetId()
}
return tx.Model(table).Ctx(tx.ctx).Data(data).InsertAndGetId()
}
// Replace does "REPLACE INTO ..." statement for the table.
// If there's already one unique record of the data in the table, it deletes the record
// and inserts a new one.
//
// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// If given data is type of slice, it then does batch replacing, and the optional parameter
// `batch` specifies the batch operation count.
func (tx *TX) Replace(table string, data interface{}, batch ...int) (sql.Result, error) {
if len(batch) > 0 {
return tx.Model(table).Ctx(tx.ctx).Data(data).Batch(batch[0]).Replace()
}
return tx.Model(table).Ctx(tx.ctx).Data(data).Replace()
}
// Save does "INSERT INTO ... ON DUPLICATE KEY UPDATE..." statement for the table.
// It updates the record if there's primary or unique index in the saving data,
// or else it inserts a new record into the table.
//
// The parameter `data` can be type of map/gmap/struct/*struct/[]map/[]struct, etc.
// Eg:
// Data(g.Map{"uid": 10000, "name":"john"})
// Data(g.Slice{g.Map{"uid": 10000, "name":"john"}, g.Map{"uid": 20000, "name":"smith"})
//
// If given data is type of slice, it then does batch saving, and the optional parameter
// `batch` specifies the batch operation count.
func (tx *TX) Save(table string, data interface{}, batch ...int) (sql.Result, error) {
if len(batch) > 0 {
return tx.Model(table).Ctx(tx.ctx).Data(data).Batch(batch[0]).Save()
}
return tx.Model(table).Ctx(tx.ctx).Data(data).Save()
}
// Update does "UPDATE ... " statement for the table.
//
// The parameter `data` can be type of string/map/gmap/struct/*struct, etc.
// Eg: "uid=10000", "uid", 10000, g.Map{"uid": 10000, "name":"john"}
//
// The parameter `condition` can be type of string/map/gmap/slice/struct/*struct, etc.
// It is commonly used with parameter `args`.
// Eg:
// "uid=10000",
// "uid", 10000
// "money>? AND name like ?", 99999, "vip_%"
// "status IN (?)", g.Slice{1,2,3}
// "age IN(?,?)", 18, 50
// User{ Id : 1, UserName : "john"}
func (tx *TX) Update(table string, data interface{}, condition interface{}, args ...interface{}) (sql.Result, error) {
return tx.Model(table).Ctx(tx.ctx).Data(data).Where(condition, args...).Update()
}
// Delete does "DELETE FROM ... " statement for the table.
//
// The parameter `condition` can be type of string/map/gmap/slice/struct/*struct, etc.
// It is commonly used with parameter `args`.
// Eg:
// "uid=10000",
// "uid", 10000
// "money>? AND name like ?", 99999, "vip_%"
// "status IN (?)", g.Slice{1,2,3}
// "age IN(?,?)", 18, 50
// User{ Id : 1, UserName : "john"}
func (tx *TX) Delete(table string, condition interface{}, args ...interface{}) (sql.Result, error) {
return tx.Model(table).Ctx(tx.ctx).Where(condition, args...).Delete()
}
| TXFromCtx |
useGraphNodeResource.tsx | /**
* @license
* MIT License
*
* Copyright (c) 2020 Alexis Munsayac
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*
* @author Alexis Munsayac <[email protected]>
* @copyright Alexis Munsayac 2020
*/
import { GraphNodeResource } from 'graph-state';
import { useDebugValue } from 'react';
import { useGraphDomainRestriction } from '../GraphDomainCore';
import useGraphNodeValue from './useGraphNodeValue';
export default function | <S>(node: GraphNodeResource<S>): S {
useGraphDomainRestriction();
const value = useGraphNodeValue(node);
useDebugValue(value.status === 'success' ? value.data : value);
if (value.status === 'success') {
return value.data;
}
throw value.data;
}
| useGraphNodeResource |
default.rs | use crate::error::DatamodelError;
use crate::validator::directive::{Args, DirectiveValidator};
use crate::{ast, dml};
use std::convert::TryInto;
/// Prismas builtin `@default` directive.
pub struct DefaultDirectiveValidator {}
impl DirectiveValidator<dml::Field> for DefaultDirectiveValidator {
fn directive_name(&self) -> &'static str {
&"default"
}
fn validate_and_apply(&self, args: &mut Args, field: &mut dml::Field) -> Result<(), DatamodelError> {
// If we allow list default values, we need to adjust the types below properly for that case. | if let dml::FieldType::Base(scalar_type) = field.field_type {
match args.default_arg("value")?.as_type(scalar_type) {
// TODO: Here, a default value directive can override the default value syntax sugar.
Ok(value) => {
let dv: dml::DefaultValue = value.try_into()?;
if dv.get_type() != scalar_type {
return self.error(
&format!(
"Default value type {:?} doesn't match expected type {:?}.",
dv.get_type(),
scalar_type
),
args.span(),
);
}
field.default_value = Some(dv)
}
Err(err) => return Err(self.parser_error(&err)),
}
} else if let dml::FieldType::Enum(_) = &field.field_type {
match args.default_arg("value")?.as_constant_literal() {
// TODO: We should also check if this value is a valid enum value.
Ok(value) => field.default_value = Some(dml::ScalarValue::ConstantLiteral(value).try_into()?),
Err(err) => return Err(self.parser_error(&err)),
}
} else {
return self.error("Cannot set a default value on a relation field.", args.span());
}
Ok(())
}
fn serialize(
&self,
field: &dml::Field,
_datamodel: &dml::Datamodel,
) -> Result<Vec<ast::Directive>, DatamodelError> {
if let Some(default_value) = &field.default_value {
return Ok(vec![ast::Directive::new(
self.directive_name(),
vec![ast::Argument::new("", default_value.clone().into())],
)]);
}
Ok(vec![])
}
} | if field.arity == dml::FieldArity::List {
return self.error("Cannot set a default value on list field.", args.span());
}
|
data_flow.py | # Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from mistral import context as auth_ctx
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models
from mistral import exceptions as exc
from mistral import expressions as expr
from mistral.lang import parser as spec_parser
from mistral import utils
from mistral.utils import inspect_utils
from mistral.workflow import states
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class ContextView(dict):
"""Workflow context view.
It's essentially an immutable composite structure providing fast lookup
over multiple dictionaries w/o having to merge those dictionaries every
time. The lookup algorithm simply iterates over provided dictionaries
one by one and returns a value taken from the first dictionary where
the provided key exists. This means that these dictionaries must be
provided in the order of decreasing priorities.
Note: Although this class extends built-in 'dict' it shouldn't be
considered a normal dictionary because it may not implement all
methods and account for all corner cases. It's only a read-only view.
"""
def __init__(self, *dicts):
super(ContextView, self).__init__()
self.dicts = dicts or []
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise KeyError(key)
def get(self, key, default=None):
for d in self.dicts:
if key in d:
return d[key]
return default
def __contains__(self, key):
return any(key in d for d in self.dicts)
def keys(self):
keys = set()
for d in self.dicts:
keys.update(d.keys())
return keys
def items(self):
return [(k, self[k]) for k in self.keys()]
def values(self):
return [self[k] for k in self.keys()]
def iteritems(self):
# NOTE: This is for compatibility with Python 2.7
# YAQL converts output objects after they are evaluated
# to basic types and it uses six.iteritems() internally
# which calls d.items() in case of Python 2.7 and d.iteritems()
# for Python 2.7
return iter(self.items())
def iterkeys(self):
# NOTE: This is for compatibility with Python 2.7
# See the comment for iteritems().
return iter(self.keys())
def itervalues(self):
# NOTE: This is for compatibility with Python 2.7
# See the comment for iteritems().
return iter(self.values())
def __len__(self):
return len(self.keys())
@staticmethod
def _raise_immutable_error():
raise exc.MistralError('Context view is immutable.')
def __setitem__(self, key, value):
self._raise_immutable_error()
def update(self, E=None, **F):
self._raise_immutable_error()
def clear(self):
self._raise_immutable_error()
def pop(self, k, d=None):
self._raise_immutable_error()
def popitem(self):
self._raise_immutable_error()
def __delitem__(self, key):
self._raise_immutable_error()
def evaluate_upstream_context(upstream_task_execs):
published_vars = {}
ctx = {}
for t_ex in upstream_task_execs:
# TODO(rakhmerov): These two merges look confusing. So it's a
# temporary solution. There's still the bug
# https://bugs.launchpad.net/mistral/+bug/1424461 that needs to be
# fixed using context variable versioning.
published_vars = utils.merge_dicts(
published_vars,
t_ex.published
)
utils.merge_dicts(ctx, evaluate_task_outbound_context(t_ex))
return utils.merge_dicts(ctx, published_vars)
def _extract_execution_result(ex):
if isinstance(ex, models.WorkflowExecution):
return ex.output
if ex.output:
return ex.output['result']
def invalidate_task_execution_result(task_ex):
for ex in task_ex.executions:
ex.accepted = False
def get_task_execution_result(task_ex):
execs = task_ex.executions
execs.sort(
key=lambda x: x.runtime_context.get('index')
)
results = [
_extract_execution_result(ex)
for ex in execs
if hasattr(ex, 'output') and ex.accepted
]
task_spec = spec_parser.get_task_spec(task_ex.spec)
if task_spec.get_with_items():
# TODO(rakhmerov): Smell: violation of 'with-items' encapsulation.
with_items_ctx = task_ex.runtime_context.get('with_items')
if with_items_ctx and with_items_ctx.get('count') > 0:
return results
else:
return []
return results[0] if len(results) == 1 else results
def publish_variables(task_ex, task_spec):
if task_ex.state not in [states.SUCCESS, states.ERROR]:
return
wf_ex = task_ex.workflow_execution
expr_ctx = ContextView(
get_current_task_dict(task_ex),
task_ex.in_context,
get_workflow_environment_dict(wf_ex),
wf_ex.context,
wf_ex.input
)
if task_ex.name in expr_ctx:
LOG.warning(
'Shadowing context variable with task name while '
'publishing: %s',
task_ex.name
)
publish_spec = task_spec.get_publish(task_ex.state)
if not publish_spec:
return
# Publish branch variables.
branch_vars = publish_spec.get_branch()
task_ex.published = expr.evaluate_recursively(branch_vars, expr_ctx)
# Publish global variables.
global_vars = publish_spec.get_global()
utils.merge_dicts(
task_ex.workflow_execution.context,
expr.evaluate_recursively(global_vars, expr_ctx)
)
# TODO(rakhmerov):
# 1. Publish atomic variables.
# 2. Add the field "publish" in TaskExecution model similar to "published"
# but containing info as
# {'branch': {vars}, 'global': {vars}, 'atomic': {vars}}
def evaluate_task_outbound_context(task_ex):
"""Evaluates task outbound Data Flow context.
This method assumes that complete task output (after publisher etc.)
has already been evaluated.
:param task_ex: DB task.
:return: Outbound task Data Flow context.
"""
# NOTE(rakhmerov): 'task_ex.in_context' has the SQLAlchemy specific
# type MutableDict. So we need to create a shallow copy using dict(...)
# initializer and use it. It's enough to be safe in order to manipulate
# with entries of the result dictionary, like adding more entries.
# However, we must not change values themselves because they are
# shared between the original dictionary and the newly created.
# It's better to avoid using the method copy.deepcopy() because on
# dictionaries with many entries it significantly increases memory
# footprint and reduces performance.
in_context = (
dict(task_ex.in_context)
if task_ex.in_context is not None else {}
)
return utils.update_dict(in_context, task_ex.published)
def evaluate_workflow_output(wf_ex, wf_output, ctx):
"""Evaluates workflow output.
:param wf_ex: Workflow execution.
:param wf_output: Workflow output.
:param ctx: Final Data Flow context (cause task's outbound context).
"""
# Evaluate workflow 'output' clause using the final workflow context.
ctx_view = ContextView(
ctx,
get_workflow_environment_dict(wf_ex),
wf_ex.context,
wf_ex.input
)
output = expr.evaluate_recursively(wf_output, ctx_view)
# TODO(rakhmerov): Many don't like that we return the whole context
# if 'output' is not explicitly defined.
return output or ctx
def get_current_task_dict(task_ex):
return {
'__task_execution': {
'id': task_ex.id,
'name': task_ex.name
}
}
def add_openstack_data_to_context(wf_ex):
wf_ex.context = wf_ex.context or {}
if CONF.pecan.auth_enable:
exec_ctx = auth_ctx.ctx()
if exec_ctx:
wf_ex.context.update({'openstack': exec_ctx.to_dict()})
def | (wf_ex):
wf_ex.context = wf_ex.context or {}
wf_ex.context['__execution'] = {'id': wf_ex.id}
def add_workflow_variables_to_context(wf_ex, wf_spec):
wf_ex.context = wf_ex.context or {}
# The context for calculating workflow variables is workflow input
# and other data already stored in workflow initial context.
ctx_view = ContextView(
get_workflow_environment_dict(wf_ex),
wf_ex.context,
wf_ex.input
)
wf_vars = expr.evaluate_recursively(wf_spec.get_vars(), ctx_view)
utils.merge_dicts(wf_ex.context, wf_vars)
def evaluate_object_fields(obj, context):
fields = inspect_utils.get_public_fields(obj)
evaluated_fields = expr.evaluate_recursively(fields, context)
for k, v in evaluated_fields.items():
setattr(obj, k, v)
def get_workflow_environment_dict(wf_ex):
if not wf_ex:
return {}
if wf_ex.root_execution_id:
return get_workflow_environment_dict(
db_api.get_workflow_execution(wf_ex.root_execution_id)
)
env_dict = wf_ex.params['env'] if 'env' in wf_ex.params else {}
return {'__env': env_dict}
| add_execution_to_context |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.