prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>overload5.C<|end_file_name|><|fim▁begin|><|fim▁hole|> template <class T> void foo(); // { dg-message "" } candidate void (*bar)() = foo<void>; void (*baz)() = foo; // { dg-error "" } can't deduce T<|fim▁end|>
// { dg-do assemble }
<|file_name|>command.rs<|end_file_name|><|fim▁begin|>use std::error; use clap; use crate::config; use super::executer::Executer; use super::{Arguments, Program}; pub struct Command<'c> { config: &'c config::command::Config, program: Program<'c>, args: Arguments<'c>, } impl<'c> Command<'c> { pub fn from_args( config: &'c config::command::Config, clap_args: &'c clap::ArgMatches<'c>, ) -> Self { trace!("command::params::exec::Command::from_args"); let program = clap_args.value_of("PROGRAM").unwrap(); let args = match clap_args.values_of("ARGS") { Some(args) => args.collect(), None => Vec::new(),<|fim▁hole|> config: config, program: program, args: args, } } pub fn new( config: &'c config::command::Config, program: &'c Program<'c>, args: &'c Arguments<'c>, ) -> Self { trace!("command::params::exec::Command::new"); Command { config: config, program: program, args: args.to_owned(), } } pub async fn run(&self) -> Result<(), Box<dyn error::Error>> { trace!("command::params::exec::Command::run"); if let Some(params_config) = self.config.params.as_ref() { let exec = Executer::from_config(params_config); exec.run(&self.program, &self.args).await?; } Ok(()) } }<|fim▁end|>
}; Command {
<|file_name|>views.py<|end_file_name|><|fim▁begin|># coding=utf-8 import json from django.utils.translation import ugettext_lazy as _ from django.http import HttpResponse import django.views from django.template import defaultfilters as template_filters from horizon import tables from horizon import exceptions from cloudkittydashboard.api import cloudkitty as api from openstack_dashboard.api import keystone from cloudkittydashboard.dashboards.project.billing_overview import tables as project_tables import time from datetime import date, timedelta, datetime import calendar from django.http import JsonResponse,HttpResponse import json import xlsxwriter import StringIO import logging LOG = logging.getLogger(__name__) def detail(request, org_id): if org_id == None: org_id = get_tenant_id(request) try: details = api.cloudkittyclient(request).billings.list_services_cost(get_month(request), org_id) except Exception: details = [] exceptions.handle(request, _('Unable to retrieve billing list.')) return HttpResponse(json.dumps(details),content_type="application/json") class IndexView(tables.DataTableView): # A very simple class-based view... template_name = "project/billing_overview/index.html" table_class = project_tables.BillingOverviewTable page_title = _("Billing Overview") def get_context_data(self, **kwargs): context = super(IndexView, self).get_context_data(**kwargs) context["tenant_id"] = get_tenant_id(self.request) context["selected_month"] = get_month(self.request) context["organizations"] = get_tenant_list(self.request) year = time.strftime("%Y",time.localtime()) month = time.strftime("%m",time.localtime()) if int(month) == 1: last_month = 12 last_year = int(year) - 1 else: last_month = int(month) - 1 last_year = year try: context["year_begin"] = str((int(year)-1)) + "/" + str((int(month))) context["year_end"] = str(last_year) + "/" + str(last_month) # get last 12 months total cost total_year = api.cloudkittyclient(self.request).billings.get_consumer_trends("month", 12, get_tenant_id(self.request)) year_sum = 0 for billing_month in total_year["consumerTrends"]: year_sum += billing_month["cost"] context["billing_year"] = year_sum #get current month cost context["time_current_month"] = year+"/"+month services_rate_list = api.cloudkittyclient(self.request).billings.list_services_cost(year+"-"+month, get_tenant_id(self.request)) current_sum = 0 for rate in services_rate_list["servicesRate"]: current_sum += rate["rate"] context["billing_current_month"] = current_sum #get last month cost context["time_last_month"] = str(last_year)+"/"+str(last_month) context["billing_last_month"] = api.cloudkittyclient(self.request).billings.get_consumer_trends("month", 1, get_tenant_id(self.request))["consumerTrends"][0]["cost"] except Exception: exceptions.handle(self.request,_("Unable to retrieve month cost")) today = date.today() context["last_12_months"] = last_12_months() return context; def get_data(self): try: billings = api.cloudkittyclient(self.request).billings.get_total_cost(get_month(self.request), get_tenant_id(self.request))["totals"] except Exception: billings = [] exceptions.handle(self.request, _('Unable to retrieve billing list.')) return billings class ReportView(django.views.generic.TemplateView): def get(self,request,*args,**kwargs): tenant_id = get_tenant_id(self.request) billing_month = get_month(self.request) tenants = get_tenant_list(self.request) for tenant in tenants: if tenant.id == tenant_id: tenant_name = tenant.name break reports = api.cloudkittyclient(self.request).billings.list_month_report(tenant_id,billing_month) output = StringIO.StringIO() workbook = xlsxwriter.Workbook(output) month_sheet = workbook.add_worksheet(tenant_name) #设置列宽度 month_sheet.set_column('A:Z',9) #表头 head = (u'部门',u'资源', u'1月',u'2月',u'3月', u'1Q合计', u'4月',u'5月',u'6月', u'2Q合计', u'上半年计', u'7月',u'8月',u'9月', u'3Q合计', u'10月',u'11月',u'12月',u'4Q合计',u'下半年计',u'全年合计' ) # 设置表头字符串和格式 head_format = workbook.add_format({ 'bold':True, 'font_size':20, 'font_name':'Microsoft YaHei' }) row = 1 col = 0 head_str = billing_month.split('-')[0] + u'年度月别计费一览表' head_str1 = u'资源及使用费用情况' month_sheet.write(row,col,head_str,head_format) row += 1 month_sheet.write(row,col,u'如需查看季、年度合计,请在月份对应位置取消隐藏') row += 2 month_sheet.write(row,col,head_str1,head_format) explain_format = workbook.add_format({'align':'right'}) year_month = billing_month.split('-') if billing_month == template_filters.date(date.today(), "Y-m"): tab_date = u'制表日期:%d月%d日' %(int(year_month[1]),date.today().day-1) else: tab_date = u'制表日期:%d月%d日' %(int(year_month[1]),calendar.monthrange(int(year_month[0]),int(year_month[1]))[1]) month_sheet.write(row,len(head)-1,u'单位:元 ' + tab_date, explain_format) row += 1 col = 0 head2_format = workbook.add_format({ 'bold':True, 'align':'center', 'valign':'vcenter', 'bg_color':'#D8E4BC', 'left':1, 'font_name':'Microsoft YaHei' }) #设置行高 month_sheet.set_row(row,30) for index_str in head: month_sheet.write(row,col,index_str,head2_format) col += 1 row += 1 month_sheet.set_column('A:A',15) #资源和合计所占行数 names = ['Compute','Volume',u'合计'] even_format = workbook.add_format({ 'border':1, 'font_name':'Microsoft YaHei', 'num_format': '#,##0.00' }) odd_format=workbook.add_format({ 'border':1, 'font_name':'Microsoft YaHei', 'bg_color':'#D9D9D9', 'num_format': '#,##0.00' }) resource_total_rows = 3 # 处理每个部门 merge_format = workbook.add_format({ 'bold':True, 'font_name':'Microsoft YaHei', 'font_size':14, 'align':'center', 'valign':'vcenter', 'border':1 }) for depart in reports['departs']: col = 1 for index,name in enumerate(names): if index % 2 != 0: month_sheet.set_row(row+index,None,odd_format) else: month_sheet.set_row(row+index,None,even_format) month_sheet.write(row+index,col,name) month_sheet.merge_range(row,0,row+resource_total_rows-1,0,depart['tenant_name'],merge_format) tmp_row = row write_col = col + 1 for month_report in depart['month_reports']: for res_tpye in month_report['res_types']: if res_tpye['res_type'] == "compute": write_row = tmp_row elif res_tpye['res_type'] == "volume": write_row = tmp_row + 1 month_sheet.write(write_row,write_col,res_tpye['rate']) write_col += 1 month = int(month_report["month"].split('-')[1]) if month == 3: for index in range(resource_total_rows-1): index_row = tmp_row + index month_sheet.write(index_row,write_col,'=SUM(C' + str(index_row+1) + ':E' + str(index_row+1) + ')') write_col += 1 elif month == 6: for index in range(resource_total_rows-1): index_row = tmp_row + index month_sheet.write(index_row,write_col,'=SUM(G' + str(index_row+1) + ':I' + str(index_row+1) + ')') month_sheet.write(index_row,write_col+1,'=SUM(F' + str(index_row+1) + '+J' + str(index_row+1) + ')') write_col += 2 elif month == 9: for index in range(resource_total_rows-1): index_row = tmp_row + index month_sheet.write(index_row,write_col,'=SUM(L' + str(index_row+1) + ':N' + str(index_row+1) + ')') write_col += 1 elif month == 12: for index in range(resource_total_rows-1): index_row = tmp_row + index month_sheet.write(index_row,write_col,'=SUM(P' + str(index_row+1) + ':R' + str(index_row+1) + ')') month_sheet.write(index_row,write_col+1,'=SUM(O' + str(index_row+1) + '+S' + str(index_row+1) + ')') month_sheet.write(index_row,write_col+2,'=SUM(K' + str(index_row+1) + '+T' + str(index_row+1) + ')') write_col += 3 #处理后面的年统计和季度统计 for month in range(1,13): if month == 3: for index in range(resource_total_rows-1): index_row = tmp_row + index month_sheet.write(index_row,5,'=SUM(C' + str(index_row+1) + ':E' + str(index_row+1) + ')') elif month == 6: for index in range(resource_total_rows-1): index_row = tmp_row + index month_sheet.write(index_row,9,'=SUM(G' + str(index_row+1) + ':I' + str(index_row+1) + ')') month_sheet.write(index_row,10,'=SUM(F' + str(index_row+1) + '+J' + str(index_row+1) + ')') elif month == 9: for index in range(resource_total_rows-1): index_row = tmp_row + index month_sheet.write(index_row,14,'=SUM(L' + str(index_row+1) + ':N' + str(index_row+1) + ')') elif month == 12: for index in range(resource_total_rows-1): index_row = tmp_row + index month_sheet.write(index_row,18,'=SUM(P' + str(index_row+1) + ':R' + str(index_row+1) + ')') month_sheet.write(index_row,19,'=SUM(O' + str(index_row+1) + '+S' + str(index_row+1) + ')') month_sheet.write(index_row,20,'=SUM(K' + str(index_row+1) + '+T' + str(index_row+1) + ')') month_sheet.write_array_formula('C' + str(tmp_row + resource_total_rows) + ':U' + str(tmp_row + resource_total_rows ), '{=C' + str(tmp_row + 1) + ':U' + str(tmp_row + 1) + '+' \ + 'C' + str(tmp_row + resource_total_rows - 1) + ':U' + str(tmp_row + resource_total_rows - 1) + '}') #跳过资源种类数目和合计的行 row = row + resource_total_rows #部门之间中间隔一行 row += 1 month_sheet.print_area(0,0,row,len(head)-1) month_sheet.fit_to_pages(1,1) month_sheet.freeze_panes(0,1) month_sheet.hide_zero() month_sheet.set_column('F:F',None,None,{'hidden':1}) month_sheet.set_column('J:J',None,None,{'hidden':1}) month_sheet.set_column('K:K',None,None,{'hidden':1}) month_sheet.set_column('O:O',None,None,{'hidden':1}) month_sheet.set_column('S:S',None,None,{'hidden':1}) month_sheet.set_column('T:T',None,None,{'hidden':1}) month_sheet.set_column('V:XFD',None,None,{'hidden':1}) workbook.close() output.seek(0) response = HttpResponse(output.read()) response['Content-type']="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" response['Content-Disposition'] = "attachment; filename=" + str(billing_month) +"-report.xlsx" return response class TrendsView(django.views.generic.TemplateView): def get(self, request, *args, **kwargs): tenant_id = request.GET.get("tenant_id", request.user.tenant_id) time_series = request.GET.get("time_series", "month") try:<|fim▁hole|> 12, get_tenant_id(self.request)) except Exception: trends = {} exceptions.handle(request,_("Unable to retrieve trend data")) # convert time and cost to x and y for trend in trends["consumerTrends"]: if time_series == u'month': trend.update(x=time.strftime('%Y-%m-%dT%H:%M:%S%Z',time.strptime(trend.pop("time"),"%Y-%m")),y=trend.pop("cost")) elif time_series == u'day': trend.update(x=time.strftime('%Y-%m-%dT%H:%M:%S%Z',time.strptime(trend.pop("time"),"%Y-%m-%d")),y=trend.pop("cost")) ret = {'series': [{ 'name': 'admin', 'unit': 'CNY', 'time_series': time_series, 'data': trends["consumerTrends"] }], 'settings': { 'verbose_date': False }} return HttpResponse(json.dumps(ret), content_type='application/json') def get_month(request): try: month = request.GET.get("month", "%s-%s" % (date.today().year, date.today().month)) return month except Exception: return None def get_tenant_id(request): return request.GET.get("tenant_id", request.user.tenant_id) def get_tenant_list(request): return sorted(request.user.authorized_tenants, reverse=False, key=lambda x: getattr(x, "sortNumber", 0)) def last_12_months(): def back_months(dt, months): month = (dt.month - months) or 12 year = dt.year - month / 12 return dt.replace(year=year, month=month, day=1) date = datetime.today() date_choices = [date] for i in range(1, 12): date = back_months(date, 1) date_choices.append(date) return date_choices<|fim▁end|>
trends = api.cloudkittyclient(self.request).billings.get_consumer_trends(time_series,
<|file_name|>asalist.py<|end_file_name|><|fim▁begin|>import collections class AsaList(object): @classmethod def flatten(cls, lst): """<|fim▁hole|> yield x else: for x in AsaList.flatten(x): yield x<|fim▁end|>
Returns Generator of non-iterable values """ for x in lst: if not isinstance(x, collections.Iterable):
<|file_name|>ticket.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # from rest_framework import viewsets from rest_framework.decorators import action from rest_framework.exceptions import MethodNotAllowed from rest_framework.response import Response from common.const.http import POST, PUT from common.mixins.api import CommonApiMixin from common.permissions import IsValidUser, IsOrgAdmin from tickets import serializers from tickets.models import Ticket from tickets.permissions.ticket import IsAssignee, IsAssigneeOrApplicant, NotClosed<|fim▁hole|> class TicketViewSet(CommonApiMixin, viewsets.ModelViewSet): permission_classes = (IsValidUser,) serializer_class = serializers.TicketDisplaySerializer serializer_classes = { 'open': serializers.TicketApplySerializer, 'approve': serializers.TicketApproveSerializer, } filterset_fields = [ 'id', 'title', 'type', 'action', 'status', 'applicant', 'applicant_display', 'processor', 'processor_display', 'assignees__id' ] search_fields = [ 'title', 'action', 'type', 'status', 'applicant_display', 'processor_display' ] def create(self, request, *args, **kwargs): raise MethodNotAllowed(self.action) def update(self, request, *args, **kwargs): raise MethodNotAllowed(self.action) def destroy(self, request, *args, **kwargs): raise MethodNotAllowed(self.action) def get_queryset(self): queryset = Ticket.get_user_related_tickets(self.request.user) return queryset def perform_create(self, serializer): instance = serializer.save() instance.open(applicant=self.request.user) @action(detail=False, methods=[POST], permission_classes=[IsValidUser, ]) def open(self, request, *args, **kwargs): return super().create(request, *args, **kwargs) @action(detail=True, methods=[PUT], permission_classes=[IsOrgAdmin, IsAssignee, NotClosed]) def approve(self, request, *args, **kwargs): response = super().update(request, *args, **kwargs) instance = self.get_object() instance.approve(processor=self.request.user) return response @action(detail=True, methods=[PUT], permission_classes=[IsOrgAdmin, IsAssignee, NotClosed]) def reject(self, request, *args, **kwargs): instance = self.get_object() serializer = self.get_serializer(instance) instance.reject(processor=request.user) return Response(serializer.data) @action(detail=True, methods=[PUT], permission_classes=[IsAssigneeOrApplicant, NotClosed]) def close(self, request, *args, **kwargs): instance = self.get_object() serializer = self.get_serializer(instance) instance.close(processor=request.user) return Response(serializer.data)<|fim▁end|>
__all__ = ['TicketViewSet']
<|file_name|>index.js<|end_file_name|><|fim▁begin|>import React, { Component } from 'react'; class Main extends Component { render() {<|fim▁hole|> return ( <main className='Main'> <h1 className='Main-headline'>Web solutions focused on<br/>Simplicity & Reliability.</h1> <h2 className='Main-subhead'>Bleeding edge technology paired with amazing <em>talent</em> and <em>creativity</em>.</h2> <a href='#' className='Main-button'>Work With Us</a> </main> ); } } export default Main;<|fim▁end|>
<|file_name|>constants.py<|end_file_name|><|fim▁begin|>import sys <|fim▁hole|> PYVERSION = 3<|fim▁end|>
PYVERSION = 2 if sys.version_info > (3,):
<|file_name|>transpile.d.ts<|end_file_name|><|fim▁begin|>import { BuildContext, ChangedFile } from './util/interfaces'; import * as ts from 'typescript'; export declare function transpile(context: BuildContext): Promise<void>; export declare function transpileUpdate(changedFiles: ChangedFile[], context: BuildContext): Promise<void>; /** * The full TS build for all app files. */ export declare function transpileWorker(context: BuildContext, workerConfig: TranspileWorkerConfig): Promise<{}>; export declare function canRunTranspileUpdate(event: string, filePath: string, context: BuildContext): boolean; export declare function transpileDiagnosticsOnly(context: BuildContext): Promise<{}>; export interface TranspileWorkerMessage { rootDir?: string; buildDir?: string; configFile?: string; transpileSuccess?: boolean; } export declare function getTsConfig(context: BuildContext, tsConfigPath?: string): TsConfig; export declare function getTsConfigPath(context: BuildContext): string; export interface TsConfig { options: ts.CompilerOptions; fileNames: string[]; raw: any; } export interface TranspileWorkerConfig { configFile: string;<|fim▁hole|> cache: boolean; inlineTemplate: boolean; }<|fim▁end|>
writeInMemory: boolean; sourceMaps: boolean;
<|file_name|>VisualizeHistory.py<|end_file_name|><|fim▁begin|>import matplotlib, os, errno # IF WE ARE ON SERVER WITH NO DISPLAY, then we use Agg: #print matplotlib.get_backend() if not('DISPLAY' in os.environ): matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np def visualize_history(hi, show=True, save=False, save_path='', show_also='', custom_title=None): # Visualize history of Keras model run. ''' Example calls: hi = model.fit(...) saveHistory(hi.history, 'tmp_saved_history.npy') visualize_history(loadHistory('tmp_saved_history.npy')) ''' # list all data in history print(hi.keys()) # summarize history for loss plt.plot(hi['loss']) plt.plot(hi['val_loss']) if show_also <> '': plt.plot(hi[show_also], linestyle='dotted') plt.plot(hi['val_'+show_also], linestyle='dotted') if custom_title is None: plt.title('model loss') else: plt.title(custom_title) plt.ylabel('loss') plt.xlabel('epoch') if show_also == '': plt.legend(['train', 'test'], loc='upper left') else: plt.legend(['train', 'test', 'train-'+show_also, 'test-'+show_also], loc='upper left') if save: filename = save_path #+'loss.png' if not os.path.exists(os.path.dirname(filename)): try: os.makedirs(os.path.dirname(filename)) except OSError as exc: if exc.errno != errno.EEXIST: raise plt.savefig(filename) plt.savefig(filename+'.pdf', format='pdf') print "Saved image to "+filename if show: plt.show() plt.clf() return plt def visualize_histories(histories, names, plotvalues='loss', show=True, save=False, save_path='', custom_title=None, just_val=False): ''' Visualize multiple histories. Example usage: h1 = loadHistory('history1.npy') h2 = loadHistory('history2.npy') visualize_histories([h1, h2], ['history1', 'history2']) ''' import matplotlib.pyplot as plt if custom_title is None: custom_title = 'model ' + plotvalues if just_val: custom_title = custom_title + ' (just validation results)' i = 0 leg = [] for hi in histories: n = names[i] # list all data in history print(hi.keys()) # summarize history for loss if not just_val: plt.plot(hi[plotvalues]) plt.plot(hi['val_'+plotvalues]) plt.title(custom_title) plt.ylabel('loss') plt.xlabel('epoch') if not just_val: leg.append(n + '') leg.append(n + '_val') i += 1 #plt.legend(leg, loc='lower left') plt.legend(leg, loc='best') if save: plt.savefig(save_path) #+plotvalues+'.png') plt.savefig(save_path+'.pdf', format='pdf') if show: plt.show() plt.clf() return plt def visualize_special_histories(histories, plotvalues='loss', show=True, save=False, save_path='', custom_title=None, just_val=False): ''' We are visualizing results of a k-fold crossvalidation training. In <histories> we have the individual runs of the experiment. ''' train_color = 'grey' val_color = 'blue' avg_train_color = 'red' avg_val_color = 'green' avg_train = [] avg_val = [] # count the averages epochs = len(histories[0][plotvalues]) for epoch in range(0, epochs): trains = [] vals = [] for hi in histories: train = hi[plotvalues][epoch] val = hi['val_'+plotvalues][epoch] trains.append(train) vals.append(val) avg_train.append( np.mean(trains) ) avg_val.append( np.mean(vals) ) import matplotlib.pyplot as plt plt.figure() if custom_title is None: custom_title = 'model ' + plotvalues if just_val: custom_title = custom_title + ' (just validation results)' i = 0 leg = [] if not just_val: leg.append('average training') leg.append('average validation') if not just_val: leg.append('training errors') leg.append('validation errors') # now averages: if not just_val: plt.plot(avg_train, color=avg_train_color) plt.plot(avg_val, color=avg_val_color) for hi in histories: # list all data in history print(hi.keys()) # summarize history for loss if not just_val: plt.plot(hi[plotvalues], linestyle='dashed', color=train_color) plt.plot(hi['val_'+plotvalues], linestyle='dashed', color=val_color) i += 1 # OK, but we also want these on top...: if not just_val: plt.plot(avg_train, color=avg_train_color) plt.plot(avg_val, color=avg_val_color) plt.title(custom_title) plt.ylabel('loss') plt.xlabel('epoch') #plt.legend(leg, loc='lower left') plt.legend(leg, loc='best') if save: plt.savefig(save_path) #+plotvalues+'.png') plt.savefig(save_path+'.pdf', format='pdf') if show: plt.show() plt.clf() return plt def visualize_whiskered_boxed(whiskered_boxes_data, names, show=True, save=False, save_path='', custom_title=''): ''' We are visualizing results of a k-fold crossvalidation training. In <whiskered_boxes_data> we have data for whiskered box plots. ''' from DatasetHandler.DatasetVizualizators import zoomOutY plt.close() plt.figure(figsize=(5, 8)) legend_on = True if custom_title == '': custom_title = ','.join(names) # mark values save_path += custom_title + '.png' y_max = 1.0 y_min = 0.0 #y_max = -100.0 #y_min = 100.0 #for i in whiskered_boxes_data: # y_max = max(max(i),y_max) # y_min = min(min(i),y_min) axes = plt.axes() import matplotlib.ticker as ticker axes.yaxis.set_major_locator(ticker.MultipleLocator(np.abs(y_max-y_min)/10.0)) axes.yaxis.set_minor_locator(ticker.MultipleLocator(np.abs(y_max-y_min)/100.0))<|fim▁hole|> #meanpointprops = dict(linewidth=0.0) meanpointprops = dict(linewidth=1.0) boxplot = plt.boxplot(whiskered_boxes_data, notch=False, showmeans=True, meanprops=meanpointprops) #plt.xticks(names) if (legend_on): boxplot['medians'][0].set_label('median') boxplot['means'][0].set_label('mean') boxplot['fliers'][0].set_label('outlayers') # boxplot['boxes'][0].set_label('boxes') # boxplot['whiskers'][0].set_label('whiskers') boxplot['caps'][0].set_label('caps') #axes.set_xlim([0.7, 1.7]) plt.legend(numpoints = 1) axes.set_title(custom_title) axes.set_xticklabels(names) zoomOutY(axes, [0.0,1.0], 0.1) ## save if save: plt.savefig(save_path) #+plotvalues+'.png') plt.savefig(save_path+'.pdf', format='pdf') if show: plt.show() plt.clf() return plt def saveHistory(history_dict, filename): # Save history or histories into npy file if not os.path.exists(os.path.dirname(filename)): try: os.makedirs(os.path.dirname(filename)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise to_be_saved = data = {'S': history_dict} np.save(open(filename, 'w'), to_be_saved) def loadHistory(filename): # Load history object loaded = np.load(open(filename)) return loaded[()]['S']<|fim▁end|>
<|file_name|>constants.py<|end_file_name|><|fim▁begin|><|fim▁hole|>KEY_LEFT = "left" KEY_INSERT = "insert" KEY_HOME = "home" KEY_END = "end" KEY_PAGEUP = "pageup" KEY_PAGEDOWN = "pagedown" KEY_BACKSPACE = "backspace" KEY_DELETE = "delete" KEY_TAB = "tab" KEY_ENTER = "enter" KEY_PAUSE = "pause" KEY_ESCAPE = "escape" KEY_SPACE = "space" KEY_KEYPAD0 = "keypad0" KEY_KEYPAD1 = "keypad1" KEY_KEYPAD2 = "keypad2" KEY_KEYPAD3 = "keypad3" KEY_KEYPAD4 = "keypad4" KEY_KEYPAD5 = "keypad5" KEY_KEYPAD6 = "keypad6" KEY_KEYPAD7 = "keypad7" KEY_KEYPAD8 = "keypad8" KEY_KEYPAD9 = "keypad9" KEY_KEYPAD_PERIOD = "keypad_period" KEY_KEYPAD_DIVIDE = "keypad_divide" KEY_KEYPAD_MULTIPLY = "keypad_multiply" KEY_KEYPAD_MINUS = "keypad_minus" KEY_KEYPAD_PLUS = "keypad_plus" KEY_KEYPAD_ENTER = "keypad_enter" KEY_CLEAR = "clear" KEY_F1 = "f1" KEY_F2 = "f2" KEY_F3 = "f3" KEY_F4 = "f4" KEY_F5 = "f5" KEY_F6 = "f6" KEY_F7 = "f7" KEY_F8 = "f8" KEY_F9 = "f9" KEY_F10 = "f10" KEY_F11 = "f11" KEY_F12 = "f12" KEY_F13 = "f13" KEY_F14 = "f14" KEY_F15 = "f15" KEY_F16 = "f16" KEY_F17 = "f17" KEY_F18 = "f18" KEY_F19 = "f19" KEY_F20 = "f20" KEY_SYSREQ = "sysreq" KEY_BREAK = "break" KEY_CONTEXT_MENU = "context_menu" KEY_BROWSER_BACK = "browser_back" KEY_BROWSER_FORWARD = "browser_forward" KEY_BROWSER_REFRESH = "browser_refresh" KEY_BROWSER_STOP = "browser_stop" KEY_BROWSER_SEARCH = "browser_search" KEY_BROWSER_FAVORITES = "browser_favorites" KEY_BROWSER_HOME = "browser_home"<|fim▁end|>
KEY_UP = "up" KEY_DOWN = "down" KEY_RIGHT = "right"
<|file_name|>Post.js<|end_file_name|><|fim▁begin|>/** * Created with JetBrains WebStorm. * User: yujilong * Date: 14-2-11 * Time: 上午11:08 * To change this template use File | Settings | File Templates. */ define(['jquery', 'util', 'post/PostContent'], function ($, util, PostContent) { var Post = function (_id, categoryId, boardId, title, createTime, taobaoUrl, lastUpdateTime , fontCoverPic , status, price) { this._id = _id; this.categoryId = categoryId; this.boardId = boardId; this.title = title; this.createTime = createTime; this.taobaoUrl = taobaoUrl; this.lastUpdateTime = lastUpdateTime; this.postContents = []; this.fontCoverPic = fontCoverPic; this.status = status; this.price = price; }; Post.prototype = { createContent: function (_id) { var content = new PostContent(_id); this.postContents.push(content); return content; }, createContentKey: function (fn) { util.sendAjax('/burning/cms/createPostContentKey', { }, 'json', fn, 'post'); }, save : function(){ var title = $('#createBox').find('input[name="postTitle"]').val(); var url = encodeURI($('#createBox').find('input[name="taobaoUrl"]').val()); this.title = title; this.taobaoUrl = url; }, pullContents : function(_id){ var tThis = this; for(var i = 0, len = this.postContents.length; i < len ; i++){ var content = this.postContents[i]; if(content._id.trim() === _id.trim()){ (function(index){ var tmp = tThis.postContents.splice(index,1); })(i); break; } } }, updateContent : function(_id,info,type,sort){ var content = this.findContent(_id); content.info = info; content.type = type; }, findContent : function (_id){ for(var i = 0, len = this.postContents.length; i < len ; i++){ if(this.postContents[i]._id === _id){ return this.postContents[i]; } } }, submit:function(){ this.initVals(); var tThis = this; var obj = this.validatePost(); if(obj.flag){ (function(){ util.sendAjax('/burning/cms/createPost',{ post : { categoryId : tThis.categoryId, boardId : tThis.boardId, title : tThis.title, taobaoUrl : tThis.taobaoUrl, postContents : tThis.postContents, status : tThis.status, price : tThis.price } }, 'json', function(data){ if(data.rs == 1){ location.reload(true); } else { alert('error'); } }, 'post'); })(); }else{ return obj; } }, initVals : function(){ this.title = $('#createBox').find('input[name="postTitle"]').val(); this.taobaoUrl = encodeURI($('#createBox').find('input[name="taobaoUrl"]').val()); this.status = $('#createBox').find('select[name="status"]').val(); this.price = $('#createBox').find('input[name="price"]').val(); for(var i = 0 , len = this.postContents.length; i<len; i++){ if(this.postContents[i].type == 1){ this.initTextContent(this.postContents[i]); }<|fim▁hole|> content.info.text = text; }, initDefaultText : function(content){ content.type = 1; content.info = { text:'' }; }, validatePost : function(){ var error_msg = ''; var tThis = this; if(!/^[0-9]+(.[0-9]+)?$/.test(tThis.price)){ error_msg = '请输入正确的数字,如12.30或12!'; return { flag : false, error : error_msg }; } if(this.postContents.length === 0){ error_msg = '请添加图片或文章内容'; return { flag : false, error : error_msg }; } if(!/^http:\/\//.test(tThis.taobaoUrl)){ return { flag: false, error : '请输入正确的URL,如:http://www.baidu.com' } } if(!this.title){ error_msg = '请填写标题'; return { flag : false, error : error_msg }; } for(var i = 0, len = this.postContents.length; i < len ; i ++) { var content = this.postContents[i]; if(content.type == 1 && !content.info.text){ return { flag : false, error : '有部分段落未填写数据' }; }else if(content.type != 1 && !content.info){ return { flag : false, error : '有部分图片尚未上传' } } } return { flag : true }; }, delByPostId : function(fn){ var tThis = this; util.sendAjax('/burning/cms/delPostById', { _id : tThis._id }, 'json', fn, 'delete'); }, getPostById : function(fn){ var tThis = this; util.sendAjax('/burning/cms/getPostById',{ _id : tThis._id },'json',fn,'get'); }, updatePostById : function(fn){ var tThis = this; util.sendAjax('/burning/cms/updatePostById',{ _id: tThis._id, taobaoUrl : tThis.taobaoUrl, price : tThis.price, title : tThis.title },'json',fn,'put'); }, updatePostStatus : function(fn){ var tThis = this; util.sendAjax('/burning/cms/updatePostStatus',{ _id:tThis._id, status : tThis.status },'json',fn,'put'); }, multUpdatePostStatus : function(ids,status,fn){ console.log(ids); var tThis = this; util.sendAjax('/burning/cms/multUpdatePostStatus',{ ids:ids, status : status },'json',fn,'put'); }, multdelPostStatus : function(ids,fn){ console.log(ids); var tThis = this; util.sendAjax('/burning/cms/multDelPost',{ ids:ids },'json',fn,'delete'); } }; return Post; });<|fim▁end|>
} }, initTextContent : function(content){ var text = $('#' + content._id).find('textarea').val();
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main import ( "flag" "net" "os" "os/signal" "syscall" // "strings" log "github.com/Sirupsen/logrus" "github.com/miekg/dns" ) var ( debug = flag.Bool("debug", false, "Debug") listen = flag.String("listen", ":53", "Address to listen to (TCP and UDP)") answersFile = flag.String("answers", "./answers.json", "File containing the answers to respond with") ttl = flag.Uint("ttl", 600, "TTL for answers") answers Answers ) func main() { log.Info("Starting rancher-dns") parseFlags()<|fim▁hole|> udpServer := &dns.Server{Addr: *listen, Net: "udp"} tcpServer := &dns.Server{Addr: *listen, Net: "tcp"} dns.HandleFunc(".", route) go func() { log.Fatal(udpServer.ListenAndServe()) }() log.Info("Listening on ", *listen) log.Fatal(tcpServer.ListenAndServe()) } func parseFlags() { flag.Parse() if *debug { log.SetLevel(log.DebugLevel) } } func loadAnswers() { var err error answers, err = ReadAnswersFile(*answersFile) if err != nil { log.Fatal(err) } log.Info("Loaded answers for ", len(answers), " IPs") } func watchSignals() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGHUP) go func() { for _ = range c { log.Info("Received HUP signal, reloading answers") loadAnswers() } }() } func route(w dns.ResponseWriter, req *dns.Msg) { if len(req.Question) == 0 { dns.HandleFailed(w, req) return } clientIp, _, _ := net.SplitHostPort(w.RemoteAddr().String()) question := req.Question[0] fqdn := question.Name rrType := dns.Type(req.Question[0].Qtype).String() log.WithFields(log.Fields{ "question": question.Name, "type": rrType, "client": clientIp, }).Debug("Request") // Client-specific answers found, ok := answers.LocalAnswer(fqdn, rrType, clientIp) if ok { log.WithFields(log.Fields{ "client": clientIp, "type": rrType, "question": question.Name, "source": "client", "found": len(found), }).Info("Found match for client") Respond(w, req, found) return } else { log.Debug("No match found for client") } // Not-client-specific answers found, ok = answers.DefaultAnswer(fqdn, rrType, clientIp) if ok { log.WithFields(log.Fields{ "client": clientIp, "type": rrType, "question": question.Name, "source": "default", "found": len(found), }).Info("Found match in ", DEFAULT_KEY) Respond(w, req, found) return } else { log.Debug("No match found in ", DEFAULT_KEY) } // Phone a friend var recurseHosts Zone found, ok = answers.Matching(clientIp, RECURSE_KEY) if ok { recurseHosts = append(recurseHosts, found...) } found, ok = answers.Matching(DEFAULT_KEY, RECURSE_KEY) if ok { recurseHosts = append(recurseHosts, found...) } var err error for _, addr := range recurseHosts { err = Proxy(w, req, addr) if err == nil { log.WithFields(log.Fields{ "client": clientIp, "type": rrType, "question": question.Name, "source": "client-recurse", "host": addr, }).Info("Sent recursive response") return } else { log.WithFields(log.Fields{ "client": clientIp, "type": rrType, "question": question.Name, "source": "default-recurse", "host": addr, }).Warn("Recurser error:", err) } } // I give up log.WithFields(log.Fields{ "client": clientIp, "type": rrType, "question": question.Name, }).Warn("No answer found") dns.HandleFailed(w, req) }<|fim▁end|>
loadAnswers() watchSignals()
<|file_name|>closable_tcp_stream.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The tiny-http Contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::io::{Read, Write}; use std::io::Result as IoResult; use std::net::{SocketAddr, TcpStream, Shutdown}; pub struct ClosableTcpStream { stream: TcpStream, close_read: bool, close_write: bool, } impl ClosableTcpStream { pub fn new(stream: TcpStream, close_read: bool, close_write: bool) -> ClosableTcpStream { ClosableTcpStream { stream: stream, close_read: close_read, close_write: close_write, } } pub fn peer_addr(&mut self) -> IoResult<SocketAddr> { self.stream.peer_addr() } } impl Drop for ClosableTcpStream { fn drop(&mut self) { if self.close_read { self.stream.shutdown(Shutdown::Read).ok(); // ignoring outcome } if self.close_write { self.stream.shutdown(Shutdown::Write).ok(); // ignoring outcome } } }<|fim▁hole|> self.stream.read(buf) } } impl Write for ClosableTcpStream { fn write(&mut self, buf: &[u8]) -> IoResult<usize> { self.stream.write(buf) } fn flush(&mut self) -> IoResult<()> { self.stream.flush() } }<|fim▁end|>
impl Read for ClosableTcpStream { fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
<|file_name|>swarming_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import sys import os import imp import subprocess import re import json import pprint import shutil import copy import StringIO import logging import itertools import numpy import time import math import uuid import tempfile from pkg_resources import resource_filename from optparse import OptionParser from nupic.database.ClientJobsDAO import ClientJobsDAO from nupic.support import configuration, initLogging from nupic.support.unittesthelpers.testcasebase import (unittest, TestCaseBase as HelperTestCaseBase) from nupic.swarming import HypersearchWorker from nupic.swarming.api import getSwarmModelParams, createAndStartSwarm from nupic.swarming.hypersearch.utils import generatePersistentJobGUID from nupic.swarming.DummyModelRunner import OPFDummyModelRunner DEFAULT_JOB_TIMEOUT_SEC = 60 * 2 # Filters _debugOut messages g_debug = True # Our setUpModule entry block sets this to an instance of MyTestEnvironment() g_myEnv = None # These are the args after using the optparse # This value for the swarm maturity window gives more repeatable results for # unit tests that use multiple workers g_repeatableSwarmMaturityWindow = 5 class MyTestEnvironment(object): # ======================================================================= def __init__(self): # Save all command line options self.options = _ArgParser.parseArgs() # Create the path to our source experiments thisFile = __file__ testDir = os.path.split(os.path.abspath(thisFile))[0] self.testSrcExpDir = os.path.join(testDir, 'experiments') self.testSrcDataDir = os.path.join(testDir, 'data') return class ExperimentTestBaseClass(HelperTestCaseBase): def setUp(self): """ Method called to prepare the test fixture. This is called by the unittest framework immediately before calling the test method; any exception raised by this method will be considered an error rather than a test failure. The default implementation does nothing. """ pass def tearDown(self): """ Method called immediately after the test method has been called and the result recorded. This is called even if the test method raised an exception, so the implementation in subclasses may need to be particularly careful about checking internal state. Any exception raised by this method will be considered an error rather than a test failure. This method will only be called if the setUp() succeeds, regardless of the outcome of the test method. The default implementation does nothing. """ # Reset our log items self.resetExtraLogItems() def shortDescription(self): """ Override to force unittest framework to use test method names instead of docstrings in the report. """ return None def _printTestHeader(self): """ Print out what test we are running """ print "###############################################################" print "Running test: %s.%s..." % (self.__class__, self._testMethodName) def _setDataPath(self, env): """ Put the path to our datasets int the NTA_DATA_PATH variable which will be used to set the environment for each of the workers Parameters: --------------------------------------------------------------------- env: The current environment dict """ assert env is not None # If already have a path, concatenate to it if "NTA_DATA_PATH" in env: newPath = "%s:%s" % (env["NTA_DATA_PATH"], g_myEnv.testSrcDataDir) else: newPath = g_myEnv.testSrcDataDir env["NTA_DATA_PATH"] = newPath def _launchWorkers(self, cmdLine, numWorkers): """ Launch worker processes to execute the given command line Parameters: ----------------------------------------------- cmdLine: The command line for each worker numWorkers: number of workers to launch retval: list of workers """ workers = [] for i in range(numWorkers): stdout = tempfile.TemporaryFile() stderr = tempfile.TemporaryFile() p = subprocess.Popen(cmdLine, bufsize=1, env=os.environ, shell=True, stdin=None, stdout=stdout, stderr=stderr) workers.append(p) return workers def _getJobInfo(self, cjDAO, workers, jobID): """ Return the job info for a job Parameters: ----------------------------------------------- cjDAO: client jobs database instance workers: list of workers for this job jobID: which job ID retval: job info """ # Get the job info jobInfo = cjDAO.jobInfo(jobID) # Since we're running outside of the Nupic engine, we launched the workers # ourself, so see how many are still running and jam the correct status # into the job info. When using the Nupic engine, it would do this # for us. runningCount = 0 for worker in workers: retCode = worker.poll() if retCode is None: runningCount += 1 if runningCount > 0: status = ClientJobsDAO.STATUS_RUNNING else: status = ClientJobsDAO.STATUS_COMPLETED jobInfo = jobInfo._replace(status=status) if status == ClientJobsDAO.STATUS_COMPLETED: jobInfo = jobInfo._replace( completionReason=ClientJobsDAO.CMPL_REASON_SUCCESS) return jobInfo def _generateHSJobParams(self, expDirectory=None, hsImp='v2', maxModels=2, predictionCacheMaxRecords=None, dataPath=None, maxRecords=10): """ This method generates a canned Hypersearch Job Params structure based on some high level options Parameters: --------------------------------------------------------------------- predictionCacheMaxRecords: If specified, determine the maximum number of records in the prediction cache. dataPath: When expDirectory is not specified, this is the data file to be used for the operation. If this value is not specified, it will use the /extra/qa/hotgym/qa_hotgym.csv. """ if expDirectory is not None: descriptionPyPath = os.path.join(expDirectory, "description.py") permutationsPyPath = os.path.join(expDirectory, "permutations.py") permutationsPyContents = open(permutationsPyPath, 'rb').read() descriptionPyContents = open(descriptionPyPath, 'rb').read() jobParams = {'persistentJobGUID' : generatePersistentJobGUID(), 'permutationsPyContents': permutationsPyContents, 'descriptionPyContents': descriptionPyContents, 'maxModels': maxModels, 'hsVersion': hsImp} if predictionCacheMaxRecords is not None: jobParams['predictionCacheMaxRecords'] = predictionCacheMaxRecords else: # Form the stream definition if dataPath is None: dataPath = resource_filename("nupic.data", os.path.join("extra", "qa", "hotgym", "qa_hotgym.csv")) streamDef = dict( version = 1, info = "TestHypersearch", streams = [ dict(source="file://%s" % (dataPath), info=dataPath, columns=["*"], first_record=0, last_record=maxRecords), ], ) # Generate the experiment description expDesc = { "predictionField": "consumption", "streamDef": streamDef, "includedFields": [ { "fieldName": "gym", "fieldType": "string" }, { "fieldName": "consumption", "fieldType": "float", "minValue": 0, "maxValue": 200, }, ], "iterationCount": maxRecords, "resetPeriod": { 'weeks': 0, 'days': 0, 'hours': 8, 'minutes': 0, 'seconds': 0, 'milliseconds': 0, 'microseconds': 0, }, } jobParams = { "persistentJobGUID": _generatePersistentJobGUID(), "description":expDesc, "maxModels": maxModels, "hsVersion": hsImp, } if predictionCacheMaxRecords is not None: jobParams['predictionCacheMaxRecords'] = predictionCacheMaxRecords return jobParams def _runPermutationsLocal(self, jobParams, loggingLevel=logging.INFO, env=None, waitForCompletion=True, continueJobId=None, ignoreErrModels=False): """ This runs permutations on the given experiment using just 1 worker in the current process Parameters: ------------------------------------------------------------------- jobParams: filled in job params for a hypersearch loggingLevel: logging level to use in the Hypersearch worker env: if not None, this is a dict of environment variables that should be sent to each worker process. These can aid in re-using the same description/permutations file for different tests. waitForCompletion: If True, wait for job to complete before returning If False, then return resultsInfoForAllModels and metricResults will be None continueJobId: If not None, then this is the JobId of a job we want to continue working on with another worker. ignoreErrModels: If true, ignore erred models retval: (jobId, jobInfo, resultsInfoForAllModels, metricResults) """ print print "==================================================================" print "Running Hypersearch job using 1 worker in current process" print "==================================================================" # Plug in modified environment variables if env is not None: saveEnvState = copy.deepcopy(os.environ) os.environ.update(env) # Insert the job entry into the database in the pre-running state cjDAO = ClientJobsDAO.get() if continueJobId is None: jobID = cjDAO.jobInsert(client='test', cmdLine='<started manually>', params=json.dumps(jobParams), alreadyRunning=True, minimumWorkers=1, maximumWorkers=1, jobType = cjDAO.JOB_TYPE_HS) else: jobID = continueJobId # Command line args. args = ['ignoreThis', '--jobID=%d' % (jobID), '--logLevel=%d' % (loggingLevel)] if continueJobId is None: args.append('--clearModels') # Run it in the current process try: HypersearchWorker.main(args) # The dummy model runner will call sys.exit(0) when # NTA_TEST_sysExitAfterNIterations is set except SystemExit: pass except: raise # Restore environment if env is not None: os.environ = saveEnvState # ---------------------------------------------------------------------- # Make sure all models completed successfully models = cjDAO.modelsGetUpdateCounters(jobID) modelIDs = [model.modelId for model in models] if len(modelIDs) > 0: results = cjDAO.modelsGetResultAndStatus(modelIDs) else: results = [] metricResults = [] for result in results: if result.results is not None: metricResults.append(json.loads(result.results)[1].values()[0]) else: metricResults.append(None) if not ignoreErrModels: self.assertNotEqual(result.completionReason, cjDAO.CMPL_REASON_ERROR, "Model did not complete successfully:\n%s" % (result.completionMsg)) # Print worker completion message jobInfo = cjDAO.jobInfo(jobID) return (jobID, jobInfo, results, metricResults) def _runPermutationsCluster(self, jobParams, loggingLevel=logging.INFO, maxNumWorkers=4, env=None, waitForCompletion=True, ignoreErrModels=False, timeoutSec=DEFAULT_JOB_TIMEOUT_SEC): """ Given a prepared, filled in jobParams for a hypersearch, this starts the job, waits for it to complete, and returns the results for all models. Parameters: ------------------------------------------------------------------- jobParams: filled in job params for a hypersearch loggingLevel: logging level to use in the Hypersearch worker maxNumWorkers: max # of worker processes to use env: if not None, this is a dict of environment variables that should be sent to each worker process. These can aid in re-using the same description/permutations file for different tests. waitForCompletion: If True, wait for job to complete before returning If False, then return resultsInfoForAllModels and metricResults will be None ignoreErrModels: If true, ignore erred models retval: (jobID, jobInfo, resultsInfoForAllModels, metricResults) """ print print "==================================================================" print "Running Hypersearch job on cluster" print "==================================================================" # -------------------------------------------------------------------- # Submit the job if env is not None and len(env) > 0: envItems = [] for (key, value) in env.iteritems(): envItems.append("export %s=%s" % (key, value)) envStr = "%s;" % (';'.join(envItems)) else: envStr = '' cmdLine = '%s python -m nupic.swarming.HypersearchWorker ' \ '--jobID={JOBID} --logLevel=%d' \ % (envStr, loggingLevel) cjDAO = ClientJobsDAO.get() jobID = cjDAO.jobInsert(client='test', cmdLine=cmdLine, params=json.dumps(jobParams), minimumWorkers=1, maximumWorkers=maxNumWorkers, jobType = cjDAO.JOB_TYPE_HS) # Launch the workers ourself if necessary (no nupic engine running). workerCmdLine = '%s python -m nupic.swarming.HypersearchWorker ' \ '--jobID=%d --logLevel=%d' \ % (envStr, jobID, loggingLevel) workers = self._launchWorkers(cmdLine=workerCmdLine, numWorkers=maxNumWorkers) print "Successfully submitted new test job, jobID=%d" % (jobID) print "Each of %d workers executing the command line: " % (maxNumWorkers), \ cmdLine if not waitForCompletion: return (jobID, None, None) if timeoutSec is None: timeout=DEFAULT_JOB_TIMEOUT_SEC else: timeout=timeoutSec # -------------------------------------------------------------------- # Wait for it to complete startTime = time.time() lastUpdate = time.time() lastCompleted = 0 lastCompletedWithError = 0 lastCompletedAsOrphan = 0 lastStarted = 0 lastJobStatus = "NA" lastJobResults = None lastActiveSwarms = None lastEngStatus = None modelIDs = [] print "\n%-15s %-15s %-15s %-15s %-15s" % ("jobStatus", "modelsStarted", "modelsCompleted", "modelErrs", "modelOrphans") print "-------------------------------------------------------------------" while (lastJobStatus != ClientJobsDAO.STATUS_COMPLETED) \ and (time.time() - lastUpdate < timeout): printUpdate = False if g_myEnv.options.verbosity == 0: time.sleep(0.5) # -------------------------------------------------------------------- # Get the job status jobInfo = self._getJobInfo(cjDAO, workers, jobID) if jobInfo.status != lastJobStatus: if jobInfo.status == ClientJobsDAO.STATUS_RUNNING \ and lastJobStatus != ClientJobsDAO.STATUS_RUNNING: print "# Swarm job now running. jobID=%s" \ % (jobInfo.jobId) lastJobStatus = jobInfo.status printUpdate = True if g_myEnv.options.verbosity >= 1: if jobInfo.engWorkerState is not None: activeSwarms = json.loads(jobInfo.engWorkerState)['activeSwarms'] if activeSwarms != lastActiveSwarms: #print "-------------------------------------------------------" print ">> Active swarms:\n ", '\n '.join(activeSwarms) lastActiveSwarms = activeSwarms print if jobInfo.results != lastJobResults: #print "-------------------------------------------------------" print ">> New best:", jobInfo.results, "###" lastJobResults = jobInfo.results if jobInfo.engStatus != lastEngStatus: print '>> Status: "%s"' % jobInfo.engStatus print lastEngStatus = jobInfo.engStatus # -------------------------------------------------------------------- # Get the list of models created for this job modelCounters = cjDAO.modelsGetUpdateCounters(jobID) if len(modelCounters) != lastStarted: modelIDs = [x.modelId for x in modelCounters] lastStarted = len(modelCounters) printUpdate = True # -------------------------------------------------------------------- # See how many have finished if len(modelIDs) > 0: completed = 0 completedWithError = 0 completedAsOrphan = 0 infos = cjDAO.modelsGetResultAndStatus(modelIDs) for info in infos: if info.status == ClientJobsDAO.STATUS_COMPLETED: completed += 1 if info.completionReason == ClientJobsDAO.CMPL_REASON_ERROR: completedWithError += 1 if info.completionReason == ClientJobsDAO.CMPL_REASON_ORPHAN: completedAsOrphan += 1 if completed != lastCompleted \ or completedWithError != lastCompletedWithError \ or completedAsOrphan != lastCompletedAsOrphan: lastCompleted = completed lastCompletedWithError = completedWithError lastCompletedAsOrphan = completedAsOrphan printUpdate = True # -------------------------------------------------------------------- # Print update? if printUpdate: lastUpdate = time.time() if g_myEnv.options.verbosity >= 1: print ">>", print "%-15s %-15d %-15d %-15d %-15d" % (lastJobStatus, lastStarted, lastCompleted, lastCompletedWithError, lastCompletedAsOrphan) # ======================================================================== # Final total print "\n<< %-15s %-15d %-15d %-15d %-15d" % (lastJobStatus, lastStarted, lastCompleted, lastCompletedWithError, lastCompletedAsOrphan) # Success? jobInfo = self._getJobInfo(cjDAO, workers, jobID) if not ignoreErrModels: self.assertEqual (jobInfo.completionReason, ClientJobsDAO.CMPL_REASON_SUCCESS) # Get final model results models = cjDAO.modelsGetUpdateCounters(jobID) modelIDs = [model.modelId for model in models] if len(modelIDs) > 0: results = cjDAO.modelsGetResultAndStatus(modelIDs) else: results = [] metricResults = [] for result in results: if result.results is not None: metricResults.append(json.loads(result.results)[1].values()[0]) else: metricResults.append(None) if not ignoreErrModels: self.assertNotEqual(result.completionReason, cjDAO.CMPL_REASON_ERROR, "Model did not complete successfully:\n%s" % (result.completionMsg)) return (jobID, jobInfo, results, metricResults) def runPermutations(self, expDirectory, hsImp='v2', maxModels=2, maxNumWorkers=4, loggingLevel=logging.INFO, onCluster=False, env=None, waitForCompletion=True, continueJobId=None, dataPath=None, maxRecords=None, timeoutSec=None, ignoreErrModels=False, predictionCacheMaxRecords=None, **kwargs): """ This runs permutations on the given experiment using just 1 worker Parameters: ------------------------------------------------------------------- expDirectory: directory containing the description.py and permutations.py hsImp: which implementation of Hypersearch to use maxModels: max # of models to generate maxNumWorkers: max # of workers to use, N/A if onCluster is False loggingLevel: logging level to use in the Hypersearch worker onCluster: if True, run on the Hadoop cluster env: if not None, this is a dict of environment variables that should be sent to each worker process. These can aid in re-using the same description/permutations file for different tests. waitForCompletion: If True, wait for job to complete before returning If False, then return resultsInfoForAllModels and metricResults will be None continueJobId: If not None, then this is the JobId of a job we want to continue working on with another worker. ignoreErrModels: If true, ignore erred models maxRecords: This value is passed to the function, _generateHSJobParams(), to represent the maximum number of records to generate for the operation. dataPath: This value is passed to the function, _generateHSJobParams(), which points to the data file for the operation. predictionCacheMaxRecords: If specified, determine the maximum number of records in the prediction cache. retval: (jobID, jobInfo, resultsInfoForAllModels, metricResults, minErrScore) """ # Put in the path to our datasets if env is None: env = dict() self._setDataPath(env) # ---------------------------------------------------------------- # Prepare the jobParams jobParams = self._generateHSJobParams(expDirectory=expDirectory, hsImp=hsImp, maxModels=maxModels, maxRecords=maxRecords, dataPath=dataPath, predictionCacheMaxRecords=predictionCacheMaxRecords) jobParams.update(kwargs) if onCluster: (jobID, jobInfo, resultInfos, metricResults) \ = self._runPermutationsCluster(jobParams=jobParams, loggingLevel=loggingLevel, maxNumWorkers=maxNumWorkers, env=env, waitForCompletion=waitForCompletion, ignoreErrModels=ignoreErrModels, timeoutSec=timeoutSec) else: (jobID, jobInfo, resultInfos, metricResults) \ = self._runPermutationsLocal(jobParams=jobParams, loggingLevel=loggingLevel, env=env, waitForCompletion=waitForCompletion, continueJobId=continueJobId, ignoreErrModels=ignoreErrModels) if not waitForCompletion: return (jobID, jobInfo, resultInfos, metricResults, None) # Print job status print "\n------------------------------------------------------------------" print "Hadoop completion reason: %s" % (jobInfo.completionReason) print "Worker completion reason: %s" % (jobInfo.workerCompletionReason) print "Worker completion msg: %s" % (jobInfo.workerCompletionMsg) if jobInfo.engWorkerState is not None: print "\nEngine worker state:" print "---------------------------------------------------------------" pprint.pprint(json.loads(jobInfo.engWorkerState)) # Print out best results minErrScore=None metricAmts = [] for result in metricResults: if result is None: metricAmts.append(numpy.inf) else: metricAmts.append(result) metricAmts = numpy.array(metricAmts) if len(metricAmts) > 0: minErrScore = metricAmts.min() minModelID = resultInfos[metricAmts.argmin()].modelId # Get model info cjDAO = ClientJobsDAO.get() modelParams = cjDAO.modelsGetParams([minModelID])[0].params print "Model params for best model: \n%s" \ % (pprint.pformat(json.loads(modelParams))) print "Best model result: %f" % (minErrScore) else: print "No models finished" return (jobID, jobInfo, resultInfos, metricResults, minErrScore) class OneNodeTests(ExperimentTestBaseClass): """ """ # AWS tests attribute required for tagging via automatic test discovery via # nosetests engineAWSClusterTest=True def setUp(self): super(OneNodeTests, self).setUp() if not g_myEnv.options.runInProc: self.skipTest("Skipping One Node test since runInProc is not specified") def testSimpleV2(self, onCluster=False, env=None, **kwargs): """ Try running simple permutations """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'simpleV2') # Test it out if env is None: env = dict() env["NTA_TEST_numIterations"] = '99' env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, **kwargs) self.assertEqual(minErrScore, 20) self.assertLess(len(resultInfos), 350) return def testDeltaV2(self, onCluster=False, env=None, **kwargs): """ Try running a simple permutations with delta encoder Test which tests the delta encoder. Runs a swarm of the sawtooth dataset With a functioning delta encoder this should give a perfect result DEBUG: disabled temporarily because this test takes too long!!! """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'delta') # Test it out if env is None: env = dict() env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) env["NTA_TEST_exitAfterNModels"] = str(20) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, **kwargs) self.assertLess(minErrScore, 0.002) return def testSimpleV2NoSpeculation(self, onCluster=False, env=None, **kwargs): """ Try running a simple permutations """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'simpleV2') # Test it out if env is None: env = dict() env["NTA_TEST_numIterations"] = '99' env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, speculativeParticles=False, **kwargs) self.assertEqual(minErrScore, 20) self.assertGreater(len(resultInfos), 1) self.assertLess(len(resultInfos), 350) return def testCLAModelV2(self, onCluster=False, env=None, maxModels=2, **kwargs): """ Try running a simple permutations using an actual CLA model, not a dummy """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'dummyV2') # Test it out if env is None: env = dict() (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=maxModels, **kwargs) self.assertEqual(len(resultInfos), maxModels) return def testCLAMultistepModel(self, onCluster=False, env=None, maxModels=2, **kwargs): """ Try running a simple permutations using an actual CLA model, not a dummy """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'simple_cla_multistep') # Test it out if env is None: env = dict() (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=maxModels, **kwargs) self.assertEqual(len(resultInfos), maxModels) return def testLegacyCLAMultistepModel(self, onCluster=False, env=None, maxModels=2, **kwargs): """ Try running a simple permutations using an actual CLA model, not a dummy. This is a legacy CLA multi-step model that doesn't declare a separate 'classifierOnly' encoder for the predicted field. """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'legacy_cla_multistep') # Test it out if env is None: env = dict() (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=maxModels, **kwargs) self.assertEqual(len(resultInfos), maxModels) return def testFilterV2(self, onCluster=False): """ Try running a simple permutations """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'simpleV2') # Don't allow the consumption encoder maxval to get to it's optimum # value (which is 250). This increases our errScore by +25. env = dict() env["NTA_TEST_maxvalFilter"] = '225' env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = '6' (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None) self.assertEqual(minErrScore, 45) self.assertLess(len(resultInfos), 400) return def testLateWorker(self, onCluster=False): """ Try running a simple permutations where a worker comes in late, after the some models have already been evaluated """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'simpleV2') env = dict() env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) env["NTA_TEST_exitAfterNModels"] = '100' (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=None, onCluster=onCluster, env=env, waitForCompletion=True, ) self.assertEqual(len(resultInfos), 100) # Run another worker the rest of the way env.pop("NTA_TEST_exitAfterNModels") (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=None, onCluster=onCluster, env=env, waitForCompletion=True, continueJobId = jobID, ) self.assertEqual(minErrScore, 20) self.assertLess(len(resultInfos), 350) return def testOrphanedModel(self, onCluster=False, modelRange=(0,1)): """ Run a worker on a model for a while, then have it exit before the model finishes. Then, run another worker, which should detect the orphaned model. """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'simpleV2') # NTA_TEST_numIterations is watched by the dummyModelParams() method of # the permutations file. # NTA_TEST_sysExitModelRange is watched by the dummyModelParams() method of # the permutations file. It tells it to do a sys.exit() after so many # iterations. # We increase the swarm maturity window to make our unit tests more # repeatable. There is an element of randomness as to which model # parameter combinations get evaluated first when running with # multiple workers, so this insures that we can find the "best" model # that we expect to see in our unit tests. env = dict() env["NTA_TEST_numIterations"] = '2' env["NTA_TEST_sysExitModelRange"] = '%d,%d' % (modelRange[0], modelRange[1]) env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] \ = '%d' % (g_repeatableSwarmMaturityWindow) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=300, onCluster=onCluster, env=env, waitForCompletion=False, ) # At this point, we should have 1 model, still running (beg, end) = modelRange self.assertEqual(len(resultInfos), end) numRunning = 0 for res in resultInfos: if res.status == ClientJobsDAO.STATUS_RUNNING: numRunning += 1 self.assertEqual(numRunning, 1) # Run another worker the rest of the way, after delaying enough time to # generate an orphaned model env["NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs"] = '1' time.sleep(2) # Here we launch another worker to finish up the job. We set the maxModels # to 300 (200 something should be enough) in case the orphan detection is # not working, it will make sure we don't loop for excessively long. # With orphan detection working, we should detect that the first model # would never complete, orphan it, and create a new one in the 1st sprint. # Without orphan detection working, we will wait forever for the 1st sprint # to finish, and will create a bunch of gen 1, then gen2, then gen 3, etc. # and gen 0 will never finish, so the swarm will never mature. (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=300, onCluster=onCluster, env=env, waitForCompletion=True, continueJobId = jobID, ) self.assertEqual(minErrScore, 20) self.assertLess(len(resultInfos), 350) return def testOrphanedModelGen1(self): """ Run a worker on a model for a while, then have it exit before a model finishes in gen index 2. Then, run another worker, which should detect the orphaned model. """ self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testOrphanedModel(modelRange=(10,11)) def testErredModel(self, onCluster=False, modelRange=(6,7)): """ Run with 1 or more models generating errors """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'simpleV2') # We increase the swarm maturity window to make our unit tests more # repeatable. There is an element of randomness as to which model # parameter combinations get evaluated first when running with # multiple workers, so this insures that we can find the "best" model # that we expect to see in our unit tests. env = dict() env["NTA_TEST_errModelRange"] = '%d,%d' % (modelRange[0], modelRange[1]) env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] \ = '%d' % (g_repeatableSwarmMaturityWindow) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, ignoreErrModels=True ) self.assertEqual(minErrScore, 20) self.assertLess(len(resultInfos), 350) return def testJobFailModel(self, onCluster=False, modelRange=(6,7)): """ Run with 1 or more models generating jobFail exception """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'simpleV2') # We increase the swarm maturity window to make our unit tests more # repeatable. There is an element of randomness as to which model # parameter combinations get evaluated first when running with # multiple workers, so this insures that we can find the "best" model # that we expect to see in our unit tests. env = dict() env["NTA_TEST_jobFailErr"] = 'True' maxNumWorkers = 4 (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, maxNumWorkers=maxNumWorkers, ignoreErrModels=True ) # Make sure workerCompletionReason was error self.assertEqual (jobInfo.workerCompletionReason, ClientJobsDAO.CMPL_REASON_ERROR) self.assertLess (len(resultInfos), maxNumWorkers+1) return def testTooManyErredModels(self, onCluster=False, modelRange=(5,10)): """ Run with too many models generating errors """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'simpleV2') # We increase the swarm maturity window to make our unit tests more # repeatable. There is an element of randomness as to which model # parameter combinations get evaluated first when running with # multiple workers, so this insures that we can find the "best" model # that we expect to see in our unit tests. env = dict() env["NTA_TEST_errModelRange"] = '%d,%d' % (modelRange[0], modelRange[1]) env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] \ = '%d' % (g_repeatableSwarmMaturityWindow) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, ignoreErrModels=True ) self.assertEqual (jobInfo.workerCompletionReason, ClientJobsDAO.CMPL_REASON_ERROR) return def testFieldThreshold(self, onCluster=False, env=None, **kwargs): """ Test minimum field contribution threshold for a field to be included in further sprints """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'field_threshold_temporal') # Test it out if env is None: env = dict() env["NTA_TEST_numIterations"] = '99' env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) env["NTA_CONF_PROP_nupic_hypersearch_max_field_branching"] = \ '%d' % (0) env["NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm"] = \ '%d' % (2) env["NTA_CONF_PROP_nupic_hypersearch_min_field_contribution"] = \ '%f' % (100) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations':200}, **kwargs) # Get the field contributions from the hypersearch results dict cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'attendance', 'visitor_winloss']) self.assertEqual(params["particleState"]["swarmId"], expectedSwarmId, "Actual swarm id = %s\nExpcted swarm id = %s" \ % (params["particleState"]["swarmId"], expectedSwarmId)) self.assertEqual( bestModel.optimizedMetric, 75) #========================================================================== env["NTA_CONF_PROP_nupic_hypersearch_min_field_contribution"] = \ '%f' % (20) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations':200}, **kwargs) # Get the field contributions from the hypersearch results dict cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'attendance', 'home_winloss', 'visitor_winloss']) self.assertEqual(params["particleState"]["swarmId"], expectedSwarmId, "Actual swarm id = %s\nExpcted swarm id = %s" \ % (params["particleState"]["swarmId"], expectedSwarmId)) assert bestModel.optimizedMetric == 55, bestModel.optimizedMetric #========================================================================== # Find best combo possible env["NTA_CONF_PROP_nupic_hypersearch_min_field_contribution"] = \ '%f' % (0.0) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations':200}, **kwargs) # Get the field contributions from the hypersearch results dict cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'attendance', 'home_winloss', 'precip', 'timestamp_dayOfWeek', 'timestamp_timeOfDay', 'visitor_winloss']) self.assertEqual(params["particleState"]["swarmId"], expectedSwarmId, "Actual swarm id = %s\nExpcted swarm id = %s" \ % (params["particleState"]["swarmId"], expectedSwarmId)) assert bestModel.optimizedMetric == 25, bestModel.optimizedMetric def testSpatialClassification(self, onCluster=False, env=None, **kwargs): """ Try running a spatial classification swarm """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'spatial_classification') # Test it out if env is None: env = dict() env["NTA_TEST_numIterations"] = '99' env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, **kwargs) self.assertEqual(minErrScore, 20) self.assertLess(len(resultInfos), 350) # Check the expected field contributions cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) actualFieldContributions = jobResults['fieldContributions'] print "Actual field contributions:", \ pprint.pformat(actualFieldContributions) expectedFieldContributions = { 'address': 100 * (90.0-30)/90.0, 'gym': 100 * (90.0-40)/90.0, 'timestamp_dayOfWeek': 100 * (90.0-80.0)/90.0, 'timestamp_timeOfDay': 100 * (90.0-90.0)/90.0, } for key, value in expectedFieldContributions.items(): self.assertEqual(actualFieldContributions[key], value, "actual field contribution from field '%s' does not " "match the expected value of %f" % (key, value)) # Check the expected best encoder combination prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'address', 'gym']) self.assertEqual(params["particleState"]["swarmId"], expectedSwarmId, "Actual swarm id = %s\nExpcted swarm id = %s" \ % (params["particleState"]["swarmId"], expectedSwarmId)) return def testAlwaysInputPredictedField(self, onCluster=False, env=None, **kwargs): """ Run a swarm where 'inputPredictedField' is set in the permutations file. The dummy model for this swarm is designed to give the lowest error when the predicted field is INCLUDED, so make sure we don't get this low error """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'input_predicted_field') # Test it out not requiring the predicted field. This should yield a # low error score if env is None: env = dict() env["NTA_TEST_inputPredictedField"] = "auto" env["NTA_TEST_numIterations"] = '99' env["NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm"] = \ '%d' % (2) env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, **kwargs) self.assertEqual(minErrScore, -50) self.assertLess(len(resultInfos), 350) # Now, require the predicted field. This should yield a high error score if env is None: env = dict() env["NTA_TEST_inputPredictedField"] = "yes" env["NTA_TEST_numIterations"] = '99' env["NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm"] = \ '%d' % (2) env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, **kwargs) self.assertEqual(minErrScore, -40) self.assertLess(len(resultInfos), 350) return def testFieldThresholdNoPredField(self, onCluster=False, env=None, **kwargs): """ Test minimum field contribution threshold for a field to be included in further sprints when doing a temporal search that does not require the predicted field. """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'input_predicted_field') # Test it out without any max field branching in effect if env is None: env = dict() env["NTA_TEST_numIterations"] = '99' env["NTA_TEST_inputPredictedField"] = "auto" env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) env["NTA_CONF_PROP_nupic_hypersearch_max_field_branching"] = \ '%d' % (0) env["NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm"] = \ '%d' % (2) env["NTA_CONF_PROP_nupic_hypersearch_min_field_contribution"] = \ '%f' % (0) if True: (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations':200}, **kwargs) # Verify the best model and check the field contributions. cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'address', 'gym', 'timestamp_dayOfWeek', 'timestamp_timeOfDay']) self.assertEqual(params["particleState"]["swarmId"], expectedSwarmId, "Actual swarm id = %s\nExpcted swarm id = %s" \ % (params["particleState"]["swarmId"], expectedSwarmId)) self.assertEqual( bestModel.optimizedMetric, -50) # Check the field contributions actualFieldContributions = jobResults['fieldContributions'] print "Actual field contributions:", \ pprint.pformat(actualFieldContributions) expectedFieldContributions = { 'consumption': 0.0, 'address': 100 * (60.0-40.0)/60.0, 'timestamp_timeOfDay': 100 * (60.0-20.0)/60.0, 'timestamp_dayOfWeek': 100 * (60.0-10.0)/60.0, 'gym': 100 * (60.0-30.0)/60.0} for key, value in expectedFieldContributions.items(): self.assertEqual(actualFieldContributions[key], value, "actual field contribution from field '%s' does not " "match the expected value of %f" % (key, value)) if True: #========================================================================== # Now test ignoring all fields that contribute less than 55% to the # error score. This means we can only use the timestamp_timeOfDay and # timestamp_dayOfWeek fields. # This should bring our best error score up to 50-30-40 = -20 env["NTA_CONF_PROP_nupic_hypersearch_min_field_contribution"] = \ '%f' % (55) env["NTA_CONF_PROP_nupic_hypersearch_max_field_branching"] = \ '%d' % (5) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations':200}, **kwargs) # Get the best model cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'timestamp_dayOfWeek', 'timestamp_timeOfDay']) self.assertEqual(params["particleState"]["swarmId"], expectedSwarmId, "Actual swarm id = %s\nExpcted swarm id = %s" \ % (params["particleState"]["swarmId"], expectedSwarmId)) self.assertEqual( bestModel.optimizedMetric, -20) # Check field contributions returned actualFieldContributions = jobResults['fieldContributions'] print "Actual field contributions:", \ pprint.pformat(actualFieldContributions) expectedFieldContributions = { 'consumption': 0.0, 'address': 100 * (60.0-40.0)/60.0, 'timestamp_timeOfDay': 100 * (60.0-20.0)/60.0, 'timestamp_dayOfWeek': 100 * (60.0-10.0)/60.0, 'gym': 100 * (60.0-30.0)/60.0} for key, value in expectedFieldContributions.items(): self.assertEqual(actualFieldContributions[key], value, "actual field contribution from field '%s' does not " "match the expected value of %f" % (key, value)) if True: #========================================================================== # Now, test using maxFieldBranching to limit the max number of fields to # 3. This means we can only use the timestamp_timeOfDay, timestamp_dayOfWeek, # gym fields. # This should bring our error score to 50-30-40-20 = -40 env["NTA_CONF_PROP_nupic_hypersearch_min_field_contribution"] = \ '%f' % (0) env["NTA_CONF_PROP_nupic_hypersearch_max_field_branching"] = \ '%d' % (3) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations':200}, **kwargs) # Get the best model cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'gym', 'timestamp_dayOfWeek', 'timestamp_timeOfDay']) self.assertEqual(params["particleState"]["swarmId"], expectedSwarmId, "Actual swarm id = %s\nExpcted swarm id = %s" \ % (params["particleState"]["swarmId"], expectedSwarmId)) self.assertEqual( bestModel.optimizedMetric, -40) if True: #========================================================================== # Now, test setting max models so that no swarm can finish completely. # Make sure we get the expected field contributions env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) env["NTA_CONF_PROP_nupic_hypersearch_max_field_branching"] = \ '%d' % (0) env["NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm"] = \ '%d' % (5) env["NTA_CONF_PROP_nupic_hypersearch_min_field_contribution"] = \ '%f' % (0) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=10, dummyModel={'iterations':200}, **kwargs) # Get the best model cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'timestamp_dayOfWeek']) self.assertEqual(params["particleState"]["swarmId"], expectedSwarmId, "Actual swarm id = %s\nExpcted swarm id = %s" \ % (params["particleState"]["swarmId"], expectedSwarmId)) self.assertEqual( bestModel.optimizedMetric, 10) # Check field contributions returned actualFieldContributions = jobResults['fieldContributions'] print "Actual field contributions:", \ pprint.pformat(actualFieldContributions) expectedFieldContributions = { 'consumption': 0.0, 'address': 100 * (60.0-40.0)/60.0, 'timestamp_timeOfDay': 100 * (60.0-20.0)/60.0, 'timestamp_dayOfWeek': 100 * (60.0-10.0)/60.0,<|fim▁hole|> 'gym': 100 * (60.0-30.0)/60.0} class MultiNodeTests(ExperimentTestBaseClass): """ Test hypersearch on multiple nodes """ # AWS tests attribute required for tagging via automatic test discovery via # nosetests engineAWSClusterTest=True def testSimpleV2(self): """ Try running a simple permutations """ self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testSimpleV2(onCluster=True) #, maxNumWorkers=7) def testDeltaV2(self): """ Try running a simple permutations """ self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testDeltaV2(onCluster=True) #, maxNumWorkers=7) def testSmartSpeculation(self, onCluster=True, env=None, **kwargs): """ Try running a simple permutations """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'smart_speculation_temporal') # Test it out if env is None: env = dict() env["NTA_TEST_numIterations"] = '99' env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) env["NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm"] = \ '%d' % (1) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations':200}, **kwargs) # Get the field contributions from the hypersearch results dict cjDAO = ClientJobsDAO.get() jobInfoStr = cjDAO.jobGetFields(jobID, ['results','engWorkerState']) jobResultsStr = jobInfoStr[0] engState = jobInfoStr[1] engState = json.loads(engState) swarms = engState["swarms"] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) # Make sure that the only nonkilled models are the ones that would have been # run without speculation prefix = 'modelParams|sensorParams|encoders|' correctOrder = ["A","B","C","D","E","F","G","Pred"] correctOrder = [prefix + x for x in correctOrder] for swarm in swarms: if swarms[swarm]["status"] == 'killed': swarmId = swarm.split(".") if(len(swarmId)>1): # Make sure that something before the last two encoders is in the # wrong sprint progression, hence why it was killed # The last encoder is the predicted field and the second to last is # the current new addition wrong=0 for i in range(len(swarmId)-2): if correctOrder[i] != swarmId[i]: wrong=1 assert wrong==1, "Some of the killed swarms should not have been " \ + "killed as they are a legal combination." if swarms[swarm]["status"] == 'completed': swarmId = swarm.split(".") if(len(swarmId)>3): # Make sure that the completed swarms are all swarms that should # have been run. # The last encoder is the predicted field and the second to last is # the current new addition for i in range(len(swarmId)-3): if correctOrder[i] != swarmId[i]: assert False , "Some of the completed swarms should not have " \ "finished as they are illegal combinations" if swarms[swarm]["status"] == 'active': assert False , "Some swarms are still active at the end of hypersearch" pass def testSmartSpeculationSpatialClassification(self, onCluster=True, env=None, **kwargs): """ Test that smart speculation does the right thing with spatial classification models. This also applies to temporal models where the predicted field is optional (or excluded) since Hypersearch treats them the same. """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'smart_speculation_spatial_classification') # Test it out if env is None: env = dict() env["NTA_TEST_numIterations"] = '99' env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) env["NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm"] = \ '%d' % (1) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, maxNumWorkers=5, dummyModel={'iterations':200}, **kwargs) # Get the worker state cjDAO = ClientJobsDAO.get() jobInfoStr = cjDAO.jobGetFields(jobID, ['results','engWorkerState']) jobResultsStr = jobInfoStr[0] engState = jobInfoStr[1] engState = json.loads(engState) swarms = engState["swarms"] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) # Make sure that the only non-killed models are the ones that would have been # run without speculation prefix = 'modelParams|sensorParams|encoders|' correctOrder = ["A","B","C"] correctOrder = [prefix + x for x in correctOrder] for swarm in swarms: if swarms[swarm]["status"] == 'killed': swarmId = swarm.split(".") if(len(swarmId) > 1): # Make sure that the best encoder is not in this swarm if correctOrder[0] in swarmId: raise RuntimeError("Some of the killed swarms should not have been " "killed as they are a legal combination.") elif swarms[swarm]["status"] == 'completed': swarmId = swarm.split(".") if(len(swarmId) >= 2): # Make sure that the completed swarms are all swarms that should # have been run. for i in range(len(swarmId)-1): if correctOrder[i] != swarmId[i]: raise RuntimeError("Some of the completed swarms should not have " "finished as they are illegal combinations") elif swarms[swarm]["status"] == 'active': raise RuntimeError("Some swarms are still active at the end of " "hypersearch") def testFieldBranching(self, onCluster=True, env=None, **kwargs): """ Try running a simple permutations """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'max_branching_temporal') # Test it out if env is None: env = dict() env["NTA_TEST_numIterations"] = '99' env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) env["NTA_CONF_PROP_nupic_hypersearch_max_field_branching"] = \ '%d' % (4) env["NTA_CONF_PROP_nupic_hypersearch_min_field_contribution"] = \ '%f' % (-20.0) env["NTA_CONF_PROP_nupic_hypersearch_minParticlesPerSwarm"] = \ '%d' % (2) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations':200}, **kwargs) # Get the field contributions from the hypersearch results dict cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'attendance', 'home_winloss', 'timestamp_dayOfWeek', 'timestamp_timeOfDay', 'visitor_winloss']) assert params["particleState"]["swarmId"] == expectedSwarmId, \ params["particleState"]["swarmId"] assert bestModel.optimizedMetric == 432, bestModel.optimizedMetric env["NTA_CONF_PROP_nupic_hypersearch_max_field_branching"] = \ '%d' % (3) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations':200}, **kwargs) # Get the field contributions from the hypersearch results dict cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'attendance', 'home_winloss', 'timestamp_timeOfDay', 'visitor_winloss']) assert params["particleState"]["swarmId"] == expectedSwarmId, \ params["particleState"]["swarmId"] assert bestModel.optimizedMetric == 465, bestModel.optimizedMetric env["NTA_CONF_PROP_nupic_hypersearch_max_field_branching"] = \ '%d' % (5) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, dummyModel={'iterations':200}, **kwargs) # Get the field contributions from the hypersearch results dict cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'attendance', 'home_winloss', 'precip', 'timestamp_dayOfWeek', 'timestamp_timeOfDay', 'visitor_winloss']) assert params["particleState"]["swarmId"] == expectedSwarmId, \ params["particleState"]["swarmId"] assert bestModel.optimizedMetric == 390, bestModel.optimizedMetric #Find best combo with 3 fields env["NTA_CONF_PROP_nupic_hypersearch_max_field_branching"] = \ '%d' % (0) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=100, dummyModel={'iterations':200}, **kwargs) # Get the field contributions from the hypersearch results dict cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) bestModel = cjDAO.modelsInfo([jobResults["bestModel"]])[0] params = json.loads(bestModel.params) prefix = 'modelParams|sensorParams|encoders|' expectedSwarmId = prefix + ('.' + prefix).join([ 'attendance', 'daynight', 'visitor_winloss']) assert params["particleState"]["swarmId"] == expectedSwarmId, \ params["particleState"]["swarmId"] assert bestModel.optimizedMetric == 406, bestModel.optimizedMetric return def testFieldThreshold(self, onCluster=True, env=None, **kwargs): """ Test minimum field contribution threshold for a field to be included in further sprints """ self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testFieldThreshold(onCluster=True) def testFieldContributions(self, onCluster=True, env=None, **kwargs): """ Try running a simple permutations """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'field_contrib_temporal') # Test it out if env is None: env = dict() env["NTA_TEST_numIterations"] = '99' env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] = \ '%d' % (g_repeatableSwarmMaturityWindow) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, onCluster=onCluster, env=env, maxModels=None, **kwargs) # Get the field contributions from the hypersearch results dict cjDAO = ClientJobsDAO.get() jobResultsStr = cjDAO.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) actualFieldContributions = jobResults['fieldContributions'] print "Actual field contributions:", actualFieldContributions expectedFieldContributions = {'consumption': 0.0, 'address': 0.0, 'timestamp_timeOfDay': 20.0, 'timestamp_dayOfWeek': 50.0, 'gym': 10.0} for key, value in expectedFieldContributions.items(): self.assertEqual(actualFieldContributions[key], value, "actual field contribution from field '%s' does not " "match the expected value of %f" % (key, value)) return def testCLAModelV2(self): """ Try running a simple permutations through a real CLA model """ self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testCLAModelV2(onCluster=True, maxModels=4) def testCLAMultistepModel(self): """ Try running a simple permutations through a real CLA model that uses multistep """ self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testCLAMultistepModel(onCluster=True, maxModels=4) def testLegacyCLAMultistepModel(self): """ Try running a simple permutations through a real CLA model that uses multistep """ self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testLegacyCLAMultistepModel(onCluster=True, maxModels=4) def testSimpleV2VariableWaits(self): """ Try running a simple permutations where certain field combinations take longer to complete, this lets us test that we successfully kill models in bad swarms that are still running. """ self._printTestHeader() # NTA_TEST_variableWaits and NTA_TEST_numIterations are watched by the # dummyModelParams() method of the permutations.py file # NTA_TEST_numIterations env = dict() env["NTA_TEST_variableWaits"] ='True' env["NTA_TEST_numIterations"] = '100' inst = OneNodeTests('testSimpleV2') return inst.testSimpleV2(onCluster=True, env=env) def testOrphanedModel(self, modelRange=(0,2)): """ Run a worker on a model for a while, then have it exit before the model finishes. Then, run another worker, which should detect the orphaned model. """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'simpleV2') # NTA_TEST_numIterations is watched by the dummyModelParams() method of # the permutations file. # NTA_TEST_sysExitModelRange is watched by the dummyModelParams() method of # the permutations file. It tells it to do a sys.exit() after so many # iterations. env = dict() env["NTA_TEST_numIterations"] = '99' env["NTA_TEST_sysExitModelRange"] = '%d,%d' % (modelRange[0], modelRange[1]) env["NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs"] = '1' env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] \ = '%d' % (g_repeatableSwarmMaturityWindow) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=500, onCluster=True, env=env, waitForCompletion=True, maxNumWorkers=4, ) self.assertEqual(minErrScore, 20) self.assertLess(len(resultInfos), 500) return def testTwoOrphanedModels(self, modelRange=(0,2)): """ Test behavior when a worker marks 2 models orphaned at the same time. """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'oneField') # NTA_TEST_numIterations is watched by the dummyModelParams() method of # the permutations file. # NTA_TEST_sysExitModelRange is watched by the dummyModelParams() method of # the permutations file. It tells it to do a sys.exit() after so many # iterations. env = dict() env["NTA_TEST_numIterations"] = '99' env["NTA_TEST_delayModelRange"] = '%d,%d' % (modelRange[0], modelRange[1]) env["NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs"] = '1' env["NTA_CONF_PROP_nupic_hypersearch_swarmMaturityWindow"] \ = '%d' % (g_repeatableSwarmMaturityWindow) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=100, onCluster=True, env=env, waitForCompletion=True, maxNumWorkers=4, ) self.assertEqual(minErrScore, 50) self.assertLess(len(resultInfos), 100) return def testOrphanedModelGen1(self): """ Run a worker on a model for a while, then have it exit before the model finishes. Then, run another worker, which should detect the orphaned model. """ self._printTestHeader() inst = MultiNodeTests(self._testMethodName) return inst.testOrphanedModel(modelRange=(10,11)) def testOrphanedModelMaxModels(self): """ Test to make sure that the maxModels parameter doesn't include orphaned models. Run a test with maxModels set to 2, where one becomes orphaned. At the end, there should be 3 models in the models table, one of which will be the new model that adopted the orphaned model """ self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'dummyV2') numModels = 5 env = dict() env["NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs"] = '3' env['NTA_TEST_max_num_models']=str(numModels) (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=numModels, env=env, onCluster=True, waitForCompletion=True, dummyModel={'metricValue': ['25','50'], 'sysExitModelRange': '0, 1', 'iterations': 20, } ) cjDB = ClientJobsDAO.get() self.assertGreaterEqual(len(resultInfos), numModels+1) completionReasons = [x.completionReason for x in resultInfos] self.assertGreaterEqual(completionReasons.count(cjDB.CMPL_REASON_EOF), numModels) self.assertGreaterEqual(completionReasons.count(cjDB.CMPL_REASON_ORPHAN), 1) def testOrphanedModelConnection(self): """Test for the correct behavior when a model uses a different connection id than what is stored in the db. The correct behavior is for the worker to log this as a warning and move on to a new model""" self._printTestHeader() # ----------------------------------------------------------------------- # Trigger "Using connection from another worker" exception inside # ModelRunner # ----------------------------------------------------------------------- expDir = os.path.join(g_myEnv.testSrcExpDir, 'dummy_multi_v2') numModels = 2 env = dict() env["NTA_CONF_PROP_nupic_hypersearch_modelOrphanIntervalSecs"] = '1' (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=numModels, env=env, onCluster=True, waitForCompletion=True, dummyModel={'metricValue': ['25','50'], 'sleepModelRange': '0, 1:5', 'iterations': 20, } ) cjDB = ClientJobsDAO.get() self.assertGreaterEqual(len(resultInfos), numModels, "%d were run. Expecting %s"%(len(resultInfos), numModels+1)) completionReasons = [x.completionReason for x in resultInfos] self.assertGreaterEqual(completionReasons.count(cjDB.CMPL_REASON_EOF), numModels) self.assertGreaterEqual(completionReasons.count(cjDB.CMPL_REASON_ORPHAN), 1) def testErredModel(self, modelRange=(6,7)): """ Run a worker on a model for a while, then have it exit before the model finishes. Then, run another worker, which should detect the orphaned model. """ self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testErredModel(onCluster=True) def testJobFailModel(self): """ Run a worker on a model for a while, then have it exit before the model finishes. Then, run another worker, which should detect the orphaned model. """ self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testJobFailModel(onCluster=True) def testTooManyErredModels(self, modelRange=(5,10)): """ Run a worker on a model for a while, then have it exit before the model finishes. Then, run another worker, which should detect the orphaned model. """ self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testTooManyErredModels(onCluster=True) def testSpatialClassification(self): """ Try running a simple permutations """ self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testSpatialClassification(onCluster=True) #, maxNumWorkers=7) def testAlwaysInputPredictedField(self): self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testAlwaysInputPredictedField(onCluster=True) def testFieldThresholdNoPredField(self): self._printTestHeader() inst = OneNodeTests(self._testMethodName) return inst.testFieldThresholdNoPredField(onCluster=True) class ModelMaturityTests(ExperimentTestBaseClass): """ """ # AWS tests attribute required for tagging via automatic test discovery via # nosetests engineAWSClusterTest=True def setUp(self): # Ignore the global hypersearch version setting. Always test hypersearch v2 hsVersion = 2 self.expDir = os.path.join(g_myEnv.testSrcExpDir, 'dummyV%d' %hsVersion) self.hsImp = "v%d" % hsVersion self.env = {'NTA_CONF_PROP_nupic_hypersearch_enableModelTermination':'0', 'NTA_CONF_PROP_nupic_hypersearch_enableModelMaturity':'1', 'NTA_CONF_PROP_nupic_hypersearch_maturityMaxSlope':'0.1', 'NTA_CONF_PROP_nupic_hypersearch_enableSwarmTermination':'0', 'NTA_CONF_PROP_nupic_hypersearch_bestModelMinRecords':'0'} def testMatureInterleaved(self): """ Test to make sure that the best model continues running even when it has matured. The 2nd model (constant) will be marked as mature first and will continue to run till the end. The 2nd model reaches maturity and should stop before all the records are consumed, and should be the best model because it has a lower error """ self._printTestHeader() self.expDir = os.path.join(g_myEnv.testSrcExpDir, 'dummy_multi_v%d' % 2) self.env['NTA_TEST_max_num_models'] = '2' jobID,_,_,_,_ = self.runPermutations(self.expDir, hsImp=self.hsImp, maxModels=2, loggingLevel = g_myEnv.options.logLevel, env = self.env, onCluster = True, dummyModel={'metricFunctions': ['lambda x: -10*math.log10(x+1) +100', 'lambda x: 100.0'], 'delay': [2.0, 0.0 ], 'waitTime':[0.05, 0.01], 'iterations':500, 'experimentDirectory':self.expDir, }) cjDB = ClientJobsDAO.get() modelIDs, records, completionReasons, matured = \ zip(*self.getModelFields( jobID, ['numRecords', 'completionReason', 'engMatured'])) results = cjDB.jobGetFields(jobID, ['results'])[0] results = json.loads(results) self.assertEqual(results['bestModel'], modelIDs[0]) self.assertEqual(records[1], 500) self.assertTrue(records[0] > 100 and records[0] < 500, "Model 2 num records: 100 < %d < 500 " % records[1]) self.assertEqual(completionReasons[1], cjDB.CMPL_REASON_EOF) self.assertEqual(completionReasons[0], cjDB.CMPL_REASON_STOPPED) self.assertTrue(matured[0], True) def testConstant(self): """ Sanity check to make sure that when only 1 model is running, it continues to run even when it has reached maturity """ self._printTestHeader() jobID,_,_,_,_ = self.runPermutations(self.expDir, hsImp=self.hsImp, maxModels=1, loggingLevel = g_myEnv.options.logLevel, env = self.env, dummyModel={'metricFunctions': ['lambda x: 100'], 'iterations':350, 'experimentDirectory':self.expDir, }) cjDB = ClientJobsDAO.get() modelIDs = cjDB.jobGetModelIDs(jobID) dbResults = cjDB.modelsGetFields(modelIDs, ['numRecords', 'completionReason', 'engMatured']) modelIDs = [x[0] for x in dbResults] records = [x[1][0] for x in dbResults] completionReasons = [x[1][1] for x in dbResults] matured = [x[1][2] for x in dbResults] results = cjDB.jobGetFields(jobID, ['results'])[0] results = json.loads(results) self.assertEqual(results['bestModel'], min(modelIDs)) self.assertEqual(records[0], 350) self.assertEqual(completionReasons[0], cjDB.CMPL_REASON_EOF) self.assertEqual(matured[0], True) def getModelFields(self, jobID, fields): cjDB = ClientJobsDAO.get() modelIDs = cjDB.jobGetModelIDs(jobID) modelParams = cjDB.modelsGetFields(modelIDs, ['params']+fields) modelIDs = [e[0] for e in modelParams] modelOrders = [json.loads(e[1][0])['structuredParams']['__model_num'] for e in modelParams] modelFields = [] for f in xrange(len(fields)): modelFields.append([e[1][f+1] for e in modelParams]) modelInfo = zip(modelOrders, modelIDs, *tuple(modelFields)) modelInfo.sort(key=lambda info:info[0]) return [e[1:] for e in sorted(modelInfo, key=lambda info:info[0])] class SwarmTerminatorTests(ExperimentTestBaseClass): """ """ # AWS tests attribute required for tagging via automatic test discovery via # nosetests engineAWSClusterTest=True def setUp(self): self.env = {'NTA_CONF_PROP_nupic_hypersearch_enableModelMaturity':'0', 'NTA_CONF_PROP_nupic_hypersearch_enableModelTermination':'0', 'NTA_CONF_PROP_nupic_hypersearch_enableSwarmTermination':'1', 'NTA_TEST_recordSwarmTerminations':'1'} def testSimple(self, useCluster=False): """Run with one really bad swarm to see if terminator picks it up correctly""" if not g_myEnv.options.runInProc: self.skipTest("Skipping One Node test since runInProc is not specified") self._printTestHeader() expDir = os.path.join(g_myEnv.testSrcExpDir, 'swarm_v2') (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=None, onCluster=useCluster, env=self.env, dummyModel={'iterations':200}) cjDB = ClientJobsDAO.get() jobResultsStr = cjDB.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) terminatedSwarms = jobResults['terminatedSwarms'] swarmMaturityWindow = int(configuration.Configuration.get( 'nupic.hypersearch.swarmMaturityWindow')) prefix = 'modelParams|sensorParams|encoders|' for swarm, (generation, scores) in terminatedSwarms.iteritems(): if prefix + 'gym' in swarm.split('.'): self.assertEqual(generation, swarmMaturityWindow-1) else: self.assertEqual(generation, swarmMaturityWindow-1+4) def testMaturity(self, useCluster=False): if not g_myEnv.options.runInProc: self.skipTest("Skipping One Node test since runInProc is not specified") self._printTestHeader() self.env['NTA_CONF_PROP_enableSwarmTermination'] = '0' expDir = os.path.join(g_myEnv.testSrcExpDir, 'swarm_maturity_v2') (jobID, jobInfo, resultInfos, metricResults, minErrScore) \ = self.runPermutations(expDir, hsImp='v2', loggingLevel=g_myEnv.options.logLevel, maxModels=None, onCluster=useCluster, env=self.env, dummyModel={'iterations':200}) cjDB = ClientJobsDAO.get() jobResultsStr = cjDB.jobGetFields(jobID, ['results'])[0] jobResults = json.loads(jobResultsStr) terminatedSwarms = jobResults['terminatedSwarms'] swarmMaturityWindow = int(configuration.Configuration.get( 'nupic.hypersearch.swarmMaturityWindow')) prefix = 'modelParams|sensorParams|encoders|' for swarm, (generation, scores) in terminatedSwarms.iteritems(): encoders = swarm.split('.') if prefix + 'gym' in encoders: self.assertEqual(generation, swarmMaturityWindow-1 + 3) elif prefix + 'address' in encoders: self.assertEqual(generation, swarmMaturityWindow-1) else: self.assertEqual(generation, swarmMaturityWindow-1 + 7) def testSimpleMN(self): self.testSimple(useCluster=True) def testMaturityMN(self): self.testMaturity(useCluster=True) def getHypersearchWinningModelID(jobID): """ Parameters: ------------------------------------------------------------------- jobID: jobID of successfully-completed Hypersearch job retval: modelID of the winning model """ cjDAO = ClientJobsDAO.get() jobResults = cjDAO.jobGetFields(jobID, ['results'])[0] print "Hypersearch job results: %r" % (jobResults,) jobResults = json.loads(jobResults) return jobResults['bestModel'] def _executeExternalCmdAndReapStdout(args): """ args: Args list as defined for the args parameter in subprocess.Popen() Returns: result dicionary: { 'exitStatus':<exit-status-of-external-command>, 'stdoutData':"string", 'stderrData':"string" } """ _debugOut(("_executeExternalCmdAndReapStdout: Starting...\n<%s>") % \ (args,)) p = subprocess.Popen(args, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE) _debugOut(("Process started for <%s>") % (args,)) (stdoutData, stderrData) = p.communicate() _debugOut(("Process completed for <%s>: exit status=%s, stdoutDataType=%s, " + \ "stdoutData=<%s>, stderrData=<%s>") % \ (args, p.returncode, type(stdoutData), stdoutData, stderrData)) result = dict( exitStatus = p.returncode, stdoutData = stdoutData, stderrData = stderrData, ) _debugOut(("_executeExternalCmdAndReapStdout for <%s>: result=\n%s") % \ (args, pprint.pformat(result, indent=4))) return result def _debugOut(text): global g_debug if g_debug: print text sys.stdout.flush() return def _getTestList(): """ Get the list of tests that can be run from this module""" suiteNames = [ 'OneNodeTests', 'MultiNodeTests', 'ModelMaturityTests', 'SwarmTerminatorTests', ] testNames = [] for suite in suiteNames: for f in dir(eval(suite)): if f.startswith('test'): testNames.append('%s.%s' % (suite, f)) return testNames class _ArgParser(object): """Class which handles command line arguments and arguments passed to the test """ args = [] @classmethod def _processArgs(cls): """ Parse our command-line args/options and strip them from sys.argv Returns the tuple (parsedOptions, remainingArgs) """ helpString = \ """%prog [options...] [-- unittestoptions...] [suitename.testname | suitename] Run the Hypersearch unit tests. To see unit test framework options, enter: python %prog -- --help Example usages: python %prog MultiNodeTests python %prog MultiNodeTests.testOrphanedModel python %prog -- MultiNodeTests.testOrphanedModel python %prog -- --failfast python %prog -- --failfast OneNodeTests.testOrphanedModel Available suitename.testnames: """ # Update help string allTests = _getTestList() for test in allTests: helpString += "\n %s" % (test) # ============================================================================ # Process command line arguments parser = OptionParser(helpString,conflict_handler="resolve") parser.add_option("--verbosity", default=0, type="int", help="Verbosity level, either 0, 1, 2, or 3 [default: %default].") parser.add_option("--runInProc", action="store_true", default=False, help="Run inProc tests, currently inProc are not being run by default " " running. [default: %default].") parser.add_option("--logLevel", action="store", type="int", default=logging.INFO, help="override default log level. Pass in an integer value that " "represents the desired logging level (10=logging.DEBUG, " "20=logging.INFO, etc.) [default: %default].") parser.add_option("--hs", dest="hsVersion", default=2, type='int', help=("Hypersearch version (only 2 supported; 1 was " "deprecated) [default: %default].")) return parser.parse_args(args=cls.args) @classmethod def parseArgs(cls): """ Returns the test arguments after parsing """ return cls._processArgs()[0] @classmethod def consumeArgs(cls): """ Consumes the test arguments and returns the remaining arguments meant for unittest.man """ return cls._processArgs()[1] def setUpModule(): print "\nCURRENT DIRECTORY:", os.getcwd() initLogging(verbose=True) global g_myEnv # Setup our environment g_myEnv = MyTestEnvironment() if __name__ == '__main__': # Form the command line for the unit test framework # Consume test specific arguments and pass remaining to unittest.main _ArgParser.args = sys.argv[1:] args = [sys.argv[0]] + _ArgParser.consumeArgs() # Run the tests if called using python unittest.main(argv=args)<|fim▁end|>
<|file_name|>Prob2_Part1.py<|end_file_name|><|fim▁begin|># Make sure you name your file with className.py from hint.hint_class_helpers.find_matches import find_matches class Prob2_Part1: """ Author: Shen Ting Ang Date: 10/11/2016<|fim▁hole|> """ def check_attempt(self, params): self.attempt = params['attempt'] #student's attempt self.answer = params['answer'] #solution self.att_tree = params['att_tree'] #attempt tree self.ans_tree = params['ans_tree'] #solution tree matches = find_matches(params) matching_node = [m[0] for m in matches] try: if '^' not in self.attempt: hint='Missing ^ in the answer. ' return hint + 'What is the probability of a specific combination of 3 coin flips? ', '1/2^3' #check if the form of the parse tree has the right #shape: an operator and two leafs that correspond to #the operands elif 'C(' not in self.attempt and '!' not in self.attempt: hint='Missing choose function in the answer. ' return hint + 'How many possible ways are there to get 2 questions correct out of 5 questions? C(5,_)', '2' else: return "","" except Exception: return '','' def get_problems(self): self.problem_list = ["Combinatorics/GrinsteadSnell3.2.18/part1"] return self.problem_list<|fim▁end|>
<|file_name|>opportunity_kraken.py<|end_file_name|><|fim▁begin|>from exchanges import helpers from exchanges import kraken from decimal import Decimal ### Kraken opportunities #### ARBITRAGE OPPORTUNITY 1 def opportunity_1(): sellLTCbuyEUR = kraken.get_current_bid_LTCEUR() sellEURbuyXBT = kraken.get_current_ask_XBTEUR() sellXBTbuyLTC = kraken.get_current_ask_XBTLTC() opport = 1-((sellLTCbuyEUR/sellEURbuyBTX)*sellXBTbuyLTC) return Decimal(opport) def opportunity_2():<|fim▁hole|> sellXBTbuyEUR = kraken.get_current_bid_XBTEUR() opport = 1-(((1/sellEURbuyLTC)/sellLTCbuyXBT)*sellXBTbuyEUR) return Decimal(opport)<|fim▁end|>
sellEURbuyLTC = kraken.get_current_ask_LTCEUR() sellLTCbuyXBT = kraken.get_current_ask_XBTLTC()
<|file_name|>p109.rs<|end_file_name|><|fim▁begin|>//! [Problem 109](https://projecteuler.net/problem=109) solver. #![warn( bad_style, unused, unused_extern_crates, unused_import_braces, unused_qualifications, unused_results )] use polynomial::Polynomial; fn count_way(score: u32) -> u32 { let mut single = vec![0u32; 26]; let mut double = vec![0; 51]; let mut triple = vec![0; 61]; let mut dup = vec![0; 121]; for i in 1..21 {<|fim▁hole|> single[i] = 1; double[i * 2] = 1; triple[i * 3] = 1; dup[i * 2] += 1; dup[i * 4] += 1; dup[i * 6] += 1; } single[25] = 1; double[50] = 1; dup[50] += 1; dup[100] += 1; let single = Polynomial::new(single); let double = Polynomial::new(double); let triple = Polynomial::new(triple); let dup = Polynomial::new(dup); let p_all = &single + &double + &triple; let p1 = double.clone(); let p2 = &double * &p_all; let p3 = &double * Polynomial::new( (&p_all * &p_all + &dup) .data() .iter() .map(|&n| n / 2) .collect(), ); let total = p1 + p2 + p3; total.data().iter().take(score as usize).sum() } fn solve() -> String { count_way(100).to_string() } common::problem!("38182", solve); #[cfg(test)] mod tests { #[test] fn example() { assert_eq!(11, super::count_way(6)); assert_eq!(42336, super::count_way(171)); } }<|fim▁end|>
<|file_name|>test_switch.py<|end_file_name|><|fim▁begin|>"""Tests for AVM Fritz!Box switch component.""" from datetime import timedelta from unittest.mock import Mock from requests.exceptions import HTTPError from homeassistant.components.fritzbox.const import ( ATTR_STATE_DEVICE_LOCKED, ATTR_STATE_LOCKED, DOMAIN as FB_DOMAIN, ) from homeassistant.components.sensor import ( ATTR_STATE_CLASS, DOMAIN as SENSOR_DOMAIN, STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL_INCREASING, ) from homeassistant.components.switch import DOMAIN from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME, ATTR_UNIT_OF_MEASUREMENT, CONF_DEVICES, ENERGY_KILO_WATT_HOUR, POWER_WATT, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_ON, STATE_UNAVAILABLE, TEMP_CELSIUS, ) from homeassistant.core import HomeAssistant import homeassistant.util.dt as dt_util from . import FritzDeviceSwitchMock, setup_config_entry from .const import CONF_FAKE_NAME, MOCK_CONFIG from tests.common import async_fire_time_changed ENTITY_ID = f"{DOMAIN}.{CONF_FAKE_NAME}" async def test_setup(hass: HomeAssistant, fritz: Mock): """Test setup of platform.""" device = FritzDeviceSwitchMock() assert await setup_config_entry( hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz ) state = hass.states.get(ENTITY_ID) assert state assert state.state == STATE_ON assert state.attributes[ATTR_FRIENDLY_NAME] == CONF_FAKE_NAME assert state.attributes[ATTR_STATE_DEVICE_LOCKED] == "fake_locked_device" assert state.attributes[ATTR_STATE_LOCKED] == "fake_locked" assert ATTR_STATE_CLASS not in state.attributes state = hass.states.get(f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_temperature") assert state assert state.state == "1.23" assert state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Temperature" assert state.attributes[ATTR_STATE_DEVICE_LOCKED] == "fake_locked_device" assert state.attributes[ATTR_STATE_LOCKED] == "fake_locked" assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == TEMP_CELSIUS assert state.attributes[ATTR_STATE_CLASS] == STATE_CLASS_MEASUREMENT state = hass.states.get(f"{ENTITY_ID}_humidity") assert state is None state = hass.states.get(f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_power_consumption") assert state assert state.state == "5.678" assert state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Power Consumption" assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == POWER_WATT assert state.attributes[ATTR_STATE_CLASS] == STATE_CLASS_MEASUREMENT state = hass.states.get(f"{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_total_energy") assert state assert state.state == "1.234" assert state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Total Energy" assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == ENERGY_KILO_WATT_HOUR assert state.attributes[ATTR_STATE_CLASS] == STATE_CLASS_TOTAL_INCREASING async def test_turn_on(hass: HomeAssistant, fritz: Mock): """Test turn device on.""" device = FritzDeviceSwitchMock() assert await setup_config_entry( hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz ) assert await hass.services.async_call( DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID}, True ) assert device.set_switch_state_on.call_count == 1 async def test_turn_off(hass: HomeAssistant, fritz: Mock): """Test turn device off.""" device = FritzDeviceSwitchMock() assert await setup_config_entry( hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz ) assert await hass.services.async_call( DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID}, True ) assert device.set_switch_state_off.call_count == 1 async def test_update(hass: HomeAssistant, fritz: Mock): """Test update without error.""" device = FritzDeviceSwitchMock() assert await setup_config_entry( hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz ) assert fritz().update_devices.call_count == 1 assert fritz().login.call_count == 1 next_update = dt_util.utcnow() + timedelta(seconds=200) async_fire_time_changed(hass, next_update) await hass.async_block_till_done() assert fritz().update_devices.call_count == 2 assert fritz().login.call_count == 1 async def test_update_error(hass: HomeAssistant, fritz: Mock): """Test update with error.""" device = FritzDeviceSwitchMock() fritz().update_devices.side_effect = HTTPError("Boom") assert not await setup_config_entry( hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz ) assert fritz().update_devices.call_count == 1 assert fritz().login.call_count == 1<|fim▁hole|> await hass.async_block_till_done() assert fritz().update_devices.call_count == 2 assert fritz().login.call_count == 2 async def test_assume_device_unavailable(hass: HomeAssistant, fritz: Mock): """Test assume device as unavailable.""" device = FritzDeviceSwitchMock() device.voltage = 0 device.energy = 0 device.power = 0 assert await setup_config_entry( hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz ) state = hass.states.get(ENTITY_ID) assert state assert state.state == STATE_UNAVAILABLE<|fim▁end|>
next_update = dt_util.utcnow() + timedelta(seconds=200) async_fire_time_changed(hass, next_update)
<|file_name|>Action2005.py<|end_file_name|><|fim▁begin|>"""2005_叫地主接口""" import clr, sys from action import * from lang import Lang clr.AddReference('ZyGames.Framework.Game') clr.AddReference('ZyGames.Doudizhu.Lang') clr.AddReference('ZyGames.Doudizhu.Model') clr.AddReference('ZyGames.Doudizhu.Bll') from ZyGames.Framework.Game.Service import * from ZyGames.Doudizhu.Lang import * from ZyGames.Doudizhu.Model import * from ZyGames.Doudizhu.Bll.Logic import * class UrlParam(HttpParam): def __init__(self): HttpParam.__init__(self) self.op = 0 class ActionResult(DataResult): <|fim▁hole|> def getUrlElement(httpGet, parent): urlParam = UrlParam() if httpGet.Contains("op"): urlParam.op = httpGet.GetIntValue("op") else: urlParam.Result = False return urlParam def takeAction(urlParam, parent): actionResult = ActionResult() user = parent.Current.User table = GameRoom.Current.GetTableData(user) if not table or not user: parent.ErrorCode = Lang.getLang("ErrorCode") parent.ErrorInfo = Lang.getLang("LoadError") actionResult.Result = False return actionResult if table.IsCallEnd: parent.ErrorCode = Lang.getLang("ErrorCode") parent.ErrorInfo = Lang.getLang("St2005_CalledIsEnd") actionResult.Result = False return actionResult position = GameTable.Current.GetUserPosition(user, table) if not position: parent.ErrorCode = Lang.getLang("ErrorCode") parent.ErrorInfo = Lang.getLang("LoadError") actionResult.Result = False return actionResult if position.IsAI: position.IsAI = False GameTable.Current.NotifyAutoAiUser(user.UserId, False) isCall = urlParam.op == 1 and True or False GameTable.Current.CallCard(user.Property.PositionId, table, isCall) GameTable.Current.ReStarTableTimer(table) return actionResult def buildPacket(writer, urlParam, actionResult): return True<|fim▁end|>
def __init__(self): DataResult.__init__(self)
<|file_name|>panicking.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::any::Any; use std::boxed::FnBox; use std::cell::RefCell; use std::panic::{PanicInfo, take_hook, set_hook}; use std::sync::{Once, ONCE_INIT}; use std::thread; // only set the panic hook once static HOOK_SET: Once = ONCE_INIT; /// TLS data pertaining to how failures should be reported pub struct PanicHandlerLocal { /// failure handler passed through spawn_named_with_send_on_failure pub fail: Box<FnBox(&Any)> } thread_local!(pub static LOCAL_INFO: RefCell<Option<PanicHandlerLocal>> = RefCell::new(None)); <|fim▁hole|>} /// Initiates the custom panic hook /// Should be called in main() after arguments have been parsed pub fn initiate_panic_hook() { // Set the panic handler only once. It is global. HOOK_SET.call_once(|| { // The original backtrace-printing hook. We still want to call this let hook = take_hook(); let new_hook = move |info: &PanicInfo| { let payload = info.payload(); let name = thread::current().name().unwrap_or("<unknown thread>").to_string(); // Notify error handlers stored in LOCAL_INFO if any LOCAL_INFO.with(|i| { if let Some(local_info) = i.borrow_mut().take() { debug!("Thread `{}` failed, notifying error handlers", name); (local_info.fail).call_box((payload, )); } else { hook(&info); } }); }; set_hook(Box::new(new_hook)); }); }<|fim▁end|>
/// Set the thread-local panic hook pub fn set_thread_local_hook(local: Box<FnBox(&Any)>) { LOCAL_INFO.with(|i| *i.borrow_mut() = Some(PanicHandlerLocal { fail: local }));
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls.defaults import * urlpatterns = patterns('', url(r'^$', 'blog.views.entry_list', name="entry-list"), url(r'^archive/(?P<year>\d{4})/$', 'blog.views.entry_archive_year', name="year-archive"), url(r'^archive/(?P<year>\d{4})/(?P<month>\d{1,2})/$', 'blog.views.entry_archive_month', name="month-archive"), url(r'^(?P<slug>[-\w]+)/$', 'blog.views.entry_detail', name="entry-detail"),<|fim▁hole|>)<|fim▁end|>
<|file_name|>volume_scanner.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- """Tests for the volume scanner objects.""" import unittest from dfvfs.lib import errors from dfvfs.path import fake_path_spec from dfvfs.path import os_path_spec from dfvfs.path import qcow_path_spec from dfvfs.path import raw_path_spec from dfvfs.path import tsk_partition_path_spec from dfvfs.path import tsk_path_spec from dfvfs.helpers import source_scanner from dfvfs.helpers import volume_scanner from dfvfs.resolver import resolver from tests import test_lib as shared_test_lib class TestVolumeScannerMediator(volume_scanner.VolumeScannerMediator): """Class that defines a volume scanner mediator for testing.""" _BDE_PASSWORD = u'bde-TEST' def GetPartitionIdentifiers(self, unused_volume_system, volume_identifiers): """Retrieves partition identifiers. This method can be used to prompt the user to provide partition identifiers. Args: volume_system (TSKVolumeSystem): volume system. volume_identifiers (list[str]): volume identifiers. Returns: list[str]: selected partition identifiers, such as "p1", or None. Raises: ScannerError: if the source cannot be processed. """ return volume_identifiers def GetVSSStoreIdentifiers(self, unused_volume_system, volume_identifiers): """Retrieves VSS store identifiers. <|fim▁hole|> Args: volume_system (VShadowVolumeSystem): volume system. volume_identifiers (list[str]): volume identifiers. Returns: list[int]: selected VSS store numbers or None. Raises: ScannerError: if the source cannot be processed. """ return [ int(volume_identifier[3:], 10) for volume_identifier in volume_identifiers] def UnlockEncryptedVolume( self, source_scanner_object, scan_context, locked_scan_node, unused_credentials): """Unlocks an encrypted volume. This method can be used to prompt the user to provide encrypted volume credentials. Args: source_scanner_object (SourceScanner): source scanner. scan_context (SourceScannerContext): source scanner context. locked_scan_node (SourceScanNode): locked scan node. credentials (Credentials): credentials supported by the locked scan node. Returns: bool: True if the volume was unlocked. """ return source_scanner_object.Unlock( scan_context, locked_scan_node.path_spec, u'password', self._BDE_PASSWORD) class VolumeScannerTest(shared_test_lib.BaseTestCase): """Tests for a volume scanner.""" # pylint: disable=protected-access def _GetTestScanNode(self, scan_context): """Retrieves the scan node for testing. Retrieves the first scan node, from the root upwards, with more or less than 1 sub node. Args: scan_context (ScanContext): scan context. Returns: SourceScanNode: scan node. """ scan_node = scan_context.GetRootScanNode() while len(scan_node.sub_nodes) == 1: scan_node = scan_node.sub_nodes[0] return scan_node @shared_test_lib.skipUnlessHasTestFile([u'tsk_volume_system.raw']) def testGetTSKPartitionIdentifiers(self): """Tests the _GetTSKPartitionIdentifiers function.""" # Test with mediator. test_mediator = TestVolumeScannerMediator() test_scanner = volume_scanner.VolumeScanner(test_mediator) test_file = self._GetTestFilePath([u'tsk_volume_system.raw']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) scan_node = self._GetTestScanNode(scan_context) expected_identifiers = sorted([u'p1', u'p2']) identifiers = test_scanner._GetTSKPartitionIdentifiers(scan_node) self.assertEqual(len(identifiers), 2) self.assertEqual(sorted(identifiers), expected_identifiers) # Test without mediator. test_scanner = volume_scanner.VolumeScanner() test_file = self._GetTestFilePath([u'tsk_volume_system.raw']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) scan_node = self._GetTestScanNode(scan_context) expected_identifiers = sorted([u'p1', u'p2']) identifiers = test_scanner._GetTSKPartitionIdentifiers(scan_node) self.assertEqual(len(identifiers), 2) self.assertEqual(sorted(identifiers), expected_identifiers) # Test error conditions. with self.assertRaises(errors.ScannerError): test_scanner._GetTSKPartitionIdentifiers(None) scan_node = source_scanner.SourceScanNode(None) with self.assertRaises(errors.ScannerError): test_scanner._GetTSKPartitionIdentifiers(scan_node) @shared_test_lib.skipUnlessHasTestFile([u'vsstest.qcow2']) def testGetVSSStoreIdentifiers(self): """Tests the _GetVSSStoreIdentifiers function.""" # Test with mediator. test_mediator = TestVolumeScannerMediator() test_scanner = volume_scanner.VolumeScanner(test_mediator) test_file = self._GetTestFilePath([u'vsstest.qcow2']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) scan_node = self._GetTestScanNode(scan_context) expected_identifiers = sorted([1, 2]) identifiers = test_scanner._GetVSSStoreIdentifiers(scan_node.sub_nodes[0]) self.assertEqual(len(identifiers), 2) self.assertEqual(sorted(identifiers), expected_identifiers) # Test without mediator. test_scanner = volume_scanner.VolumeScanner() test_file = self._GetTestFilePath([u'vsstest.qcow2']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) scan_node = self._GetTestScanNode(scan_context) with self.assertRaises(errors.ScannerError): test_scanner._GetVSSStoreIdentifiers(scan_node.sub_nodes[0]) # Test error conditions. with self.assertRaises(errors.ScannerError): test_scanner._GetVSSStoreIdentifiers(None) scan_node = source_scanner.SourceScanNode(None) with self.assertRaises(errors.ScannerError): test_scanner._GetVSSStoreIdentifiers(scan_node) def testScanFileSystem(self): """Tests the _ScanFileSystem function.""" test_scanner = volume_scanner.VolumeScanner() path_spec = fake_path_spec.FakePathSpec(location=u'/') scan_node = source_scanner.SourceScanNode(path_spec) base_path_specs = [] test_scanner._ScanFileSystem(scan_node, base_path_specs) self.assertEqual(len(base_path_specs), 1) # Test error conditions. with self.assertRaises(errors.ScannerError): test_scanner._ScanFileSystem(None, []) scan_node = source_scanner.SourceScanNode(None) with self.assertRaises(errors.ScannerError): test_scanner._ScanFileSystem(scan_node, []) @shared_test_lib.skipUnlessHasTestFile([u'ímynd.dd']) def testScanVolumeRAW(self): """Tests the _ScanVolume function on a RAW image.""" test_scanner = volume_scanner.VolumeScanner() test_file = self._GetTestFilePath([u'ímynd.dd']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) volume_scan_node = scan_context.GetRootScanNode() base_path_specs = [] test_scanner._ScanVolume(scan_context, volume_scan_node, base_path_specs) self.assertEqual(len(base_path_specs), 1) # Test error conditions. scan_context = source_scanner.SourceScannerContext() with self.assertRaises(errors.ScannerError): test_scanner._ScanVolume(scan_context, None, []) volume_scan_node = source_scanner.SourceScanNode(None) with self.assertRaises(errors.ScannerError): test_scanner._ScanVolume(scan_context, volume_scan_node, []) @shared_test_lib.skipUnlessHasTestFile([u'vsstest.qcow2']) def testScanVolumeVSS(self): """Tests the _ScanVolume function on NSS.""" test_mediator = TestVolumeScannerMediator() test_scanner = volume_scanner.VolumeScanner(test_mediator) test_file = self._GetTestFilePath([u'vsstest.qcow2']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) volume_scan_node = self._GetTestScanNode(scan_context) base_path_specs = [] test_scanner._ScanVolume( scan_context, volume_scan_node, base_path_specs) self.assertEqual(len(base_path_specs), 3) @shared_test_lib.skipUnlessHasTestFile([u'ímynd.dd']) def testScanVolumeScanNodeRAW(self): """Tests the _ScanVolumeScanNode function on a RAW image.""" test_scanner = volume_scanner.VolumeScanner() test_file = self._GetTestFilePath([u'ímynd.dd']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) volume_scan_node = scan_context.GetRootScanNode() base_path_specs = [] test_scanner._ScanVolumeScanNode( scan_context, volume_scan_node, base_path_specs) self.assertEqual(len(base_path_specs), 1) # Test error conditions. scan_context = source_scanner.SourceScannerContext() with self.assertRaises(errors.ScannerError): test_scanner._ScanVolumeScanNode(scan_context, None, []) volume_scan_node = source_scanner.SourceScanNode(None) with self.assertRaises(errors.ScannerError): test_scanner._ScanVolumeScanNode(scan_context, volume_scan_node, []) @shared_test_lib.skipUnlessHasTestFile([u'vsstest.qcow2']) def testScanVolumeScanNode(self): """Tests the _ScanVolumeScanNode function on VSS.""" test_mediator = TestVolumeScannerMediator() test_scanner = volume_scanner.VolumeScanner(test_mediator) # Test VSS root. test_file = self._GetTestFilePath([u'vsstest.qcow2']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) volume_scan_node = self._GetTestScanNode(scan_context) base_path_specs = [] test_scanner._ScanVolumeScanNode( scan_context, volume_scan_node, base_path_specs) self.assertEqual(len(base_path_specs), 0) # Test VSS volume. test_file = self._GetTestFilePath([u'vsstest.qcow2']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) volume_scan_node = self._GetTestScanNode(scan_context) base_path_specs = [] test_scanner._ScanVolumeScanNode( scan_context, volume_scan_node.sub_nodes[0], base_path_specs) self.assertEqual(len(base_path_specs), 2) @shared_test_lib.skipUnlessHasTestFile([u'bdetogo.raw']) def testScanVolumeScanNodeEncrypted(self): """Tests the _ScanVolumeScanNodeEncrypted function.""" resolver.Resolver.key_chain.Empty() test_mediator = TestVolumeScannerMediator() test_scanner = volume_scanner.VolumeScanner(test_mediator) test_file = self._GetTestFilePath([u'bdetogo.raw']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) volume_scan_node = self._GetTestScanNode(scan_context) base_path_specs = [] test_scanner._ScanVolumeScanNode( scan_context, volume_scan_node.sub_nodes[0], base_path_specs) self.assertEqual(len(base_path_specs), 1) # Test error conditions. path_spec = fake_path_spec.FakePathSpec(location=u'/') scan_node = source_scanner.SourceScanNode(path_spec) with self.assertRaises(errors.ScannerError): test_scanner._ScanVolumeScanNodeEncrypted(scan_node, None, []) volume_scan_node = source_scanner.SourceScanNode(None) with self.assertRaises(errors.ScannerError): test_scanner._ScanVolumeScanNodeEncrypted(scan_node, volume_scan_node, []) @shared_test_lib.skipUnlessHasTestFile([u'vsstest.qcow2']) def testScanVolumeScanNodeVSS(self): """Tests the _ScanVolumeScanNodeVSS function.""" test_mediator = TestVolumeScannerMediator() test_scanner = volume_scanner.VolumeScanner(test_mediator) # Test root. test_file = self._GetTestFilePath([u'vsstest.qcow2']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) volume_scan_node = scan_context.GetRootScanNode() base_path_specs = [] test_scanner._ScanVolumeScanNodeVSS(volume_scan_node, base_path_specs) self.assertEqual(len(base_path_specs), 0) # Test VSS volume. test_file = self._GetTestFilePath([u'vsstest.qcow2']) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(test_file) test_scanner._source_scanner.Scan(scan_context) volume_scan_node = self._GetTestScanNode(scan_context) base_path_specs = [] test_scanner._ScanVolumeScanNodeVSS( volume_scan_node.sub_nodes[0], base_path_specs) self.assertEqual(len(base_path_specs), 2) # Test error conditions. with self.assertRaises(errors.ScannerError): test_scanner._ScanVolumeScanNodeVSS(None, []) volume_scan_node = source_scanner.SourceScanNode(None) with self.assertRaises(errors.ScannerError): test_scanner._ScanVolumeScanNodeVSS(volume_scan_node, []) @shared_test_lib.skipUnlessHasTestFile([u'ímynd.dd']) def testGetBasePathSpecsRAW(self): """Tests the GetBasePathSpecs function on a RAW image.""" test_file = self._GetTestFilePath([u'ímynd.dd']) test_scanner = volume_scanner.VolumeScanner() test_os_path_spec = os_path_spec.OSPathSpec(location=test_file) test_raw_path_spec = raw_path_spec.RawPathSpec(parent=test_os_path_spec) test_tsk_path_spec = tsk_path_spec.TSKPathSpec( location=u'/', parent=test_raw_path_spec) expected_base_path_specs = [test_tsk_path_spec.comparable] base_path_specs = test_scanner.GetBasePathSpecs(test_file) base_path_specs = [ base_path_spec.comparable for base_path_spec in base_path_specs] self.assertEqual(base_path_specs, expected_base_path_specs) # Test error conditions. with self.assertRaises(errors.ScannerError): test_scanner.GetBasePathSpecs(None) with self.assertRaises(errors.ScannerError): test_scanner.GetBasePathSpecs(u'/bogus') @shared_test_lib.skipUnlessHasTestFile([u'tsk_volume_system.raw']) def testGetBasePathSpecsPartitionedImage(self): """Tests the GetBasePathSpecs function on a partitioned image.""" test_file = self._GetTestFilePath([u'tsk_volume_system.raw']) test_scanner = volume_scanner.VolumeScanner() test_os_path_spec = os_path_spec.OSPathSpec(location=test_file) test_raw_path_spec = raw_path_spec.RawPathSpec(parent=test_os_path_spec) test_tsk_partition_path_spec = tsk_partition_path_spec.TSKPartitionPathSpec( location=u'/p2', part_index=6, start_offset=0x0002c000, parent=test_raw_path_spec) test_tsk_path_spec = tsk_path_spec.TSKPathSpec( location=u'/', parent=test_tsk_partition_path_spec) expected_base_path_specs = [test_tsk_path_spec.comparable] base_path_specs = test_scanner.GetBasePathSpecs(test_file) base_path_specs = [ base_path_spec.comparable for base_path_spec in base_path_specs] self.assertEqual(base_path_specs, expected_base_path_specs) @shared_test_lib.skipUnlessHasTestFile([u'testdir_os']) def testGetBasePathSpecsDirectory(self): """Tests the GetBasePathSpecs function on a directory.""" test_file = self._GetTestFilePath([u'testdir_os']) test_scanner = volume_scanner.VolumeScanner() test_os_path_spec = os_path_spec.OSPathSpec(location=test_file) expected_base_path_specs = [test_os_path_spec.comparable] base_path_specs = test_scanner.GetBasePathSpecs(test_file) base_path_specs = [ base_path_spec.comparable for base_path_spec in base_path_specs] self.assertEqual(base_path_specs, expected_base_path_specs) @shared_test_lib.skipUnlessHasTestFile([u'windows_volume.qcow2']) class WindowsVolumeScannerTest(shared_test_lib.BaseTestCase): """Tests for a Windows volume scanner.""" # pylint: disable=protected-access def testScanFileSystem(self): """Tests the _ScanFileSystem function.""" test_scanner = volume_scanner.WindowsVolumeScanner() test_file = self._GetTestFilePath([u'windows_volume.qcow2']) test_os_path_spec = os_path_spec.OSPathSpec(location=test_file) test_qcow_path_spec = qcow_path_spec.QCOWPathSpec(parent=test_os_path_spec) test_tsk_path_spec = tsk_path_spec.TSKPathSpec( location=u'/', parent=test_qcow_path_spec) scan_node = source_scanner.SourceScanNode(test_tsk_path_spec) base_path_specs = [] test_scanner._ScanFileSystem(scan_node, base_path_specs) self.assertEqual(len(base_path_specs), 1) # Test error conditions. with self.assertRaises(errors.ScannerError): test_scanner._ScanFileSystem(None, []) scan_node = source_scanner.SourceScanNode(None) with self.assertRaises(errors.ScannerError): test_scanner._ScanFileSystem(scan_node, []) # _ScanFileSystemForWindowsDirectory is tested by testScanFileSystem. def testOpenFile(self): """Tests the OpenFile function.""" test_file = self._GetTestFilePath([u'windows_volume.qcow2']) test_scanner = volume_scanner.WindowsVolumeScanner() result = test_scanner.ScanForWindowsVolume(test_file) self.assertTrue(result) file_object = test_scanner.OpenFile( u'C:\\Windows\\System32\\config\\syslog') self.assertIsNotNone(file_object) file_object.close() file_object = test_scanner.OpenFile(u'C:\\bogus') self.assertIsNone(file_object) with self.assertRaises(IOError): test_scanner.OpenFile(u'C:\\Windows\\System32\\config') @shared_test_lib.skipUnlessHasTestFile([u'tsk_volume_system.raw']) def testScanForWindowsVolume(self): """Tests the ScanForWindowsVolume function.""" test_file = self._GetTestFilePath([u'tsk_volume_system.raw']) test_scanner = volume_scanner.WindowsVolumeScanner() result = test_scanner.ScanForWindowsVolume(test_file) self.assertFalse(result) test_file = self._GetTestFilePath([u'windows_volume.qcow2']) test_scanner = volume_scanner.WindowsVolumeScanner() result = test_scanner.ScanForWindowsVolume(test_file) self.assertTrue(result) if __name__ == '__main__': unittest.main()<|fim▁end|>
This method can be used to prompt the user to provide VSS store identifiers.
<|file_name|>pretty.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>} #[derive(Clone, Debug)] pub enum UserIdentifiedItem { ItemViaNode(ast::NodeId), ItemViaPath(Vec<String>), }<|fim▁end|>
use syntax::ast; pub enum PpMode {
<|file_name|>issue_4086.rs<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
#[cfg(any())] extern "C++" {}
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>//! af-cuda-interop package is to used only when the application intends to mix //! arrayfire code with raw CUDA code. use arrayfire::{handle_error_general, AfError}; use cuda_runtime_sys::cudaStream_t; use libc::c_int; extern "C" { fn afcu_get_native_id(native_id: *mut c_int, id: c_int) -> c_int; fn afcu_set_native_id(native_id: c_int) -> c_int; fn afcu_get_stream(out: *mut cudaStream_t, id: c_int) -> c_int; } /// Get active device's id in CUDA context /// /// # Parameters /// /// - `id` is the integer identifier of concerned CUDA device as per ArrayFire context /// /// # Return Values /// /// Integer identifier of device in CUDA context<|fim▁hole|> let err_val = afcu_get_native_id(&mut temp as *mut c_int, id); handle_error_general(AfError::from(err_val)); temp } } /// Set active device using CUDA context's id /// /// # Parameters /// /// - `id` is the identifier of GPU in CUDA context pub fn set_device_native_id(native_id: i32) { unsafe { let err_val = afcu_set_native_id(native_id); handle_error_general(AfError::from(err_val)); } } /// Get CUDA stream of active CUDA device /// /// # Parameters /// /// - `id` is the identifier of device in ArrayFire context /// /// # Return Values /// /// [cudaStream_t](https://docs.rs/cuda-runtime-sys/0.3.0-alpha.1/cuda_runtime_sys/type.cudaStream_t.html) handle. pub fn get_stream(native_id: i32) -> cudaStream_t { unsafe { let mut ret_val: cudaStream_t = std::ptr::null_mut(); let err_val = afcu_get_stream(&mut ret_val as *mut cudaStream_t, native_id); handle_error_general(AfError::from(err_val)); ret_val } }<|fim▁end|>
pub fn get_device_native_id(id: i32) -> i32 { unsafe { let mut temp: i32 = 0;
<|file_name|>gene_set.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # Author: Francois-Jose Serra # Creation Date: 2010/08/17 16:46:02 # easy_install fisher/ if not also in extra stats package try: from extra_stats.fisher import pvalue except ImportError: from fisher import pvalue # in my extra_stats package import sys sys.path.append('/home/francisco/toolbox/utils/') from extra_stats.fdr import bh_qvalues from numpy import log from optparse import OptionParser from bisect import bisect_left __version__ = "0.10" __title__ = "gene set tool kit v%s" % __version__ class Gene_set: ''' Fatiscan with upper case, it is an object. from gene_set import Gene_set infile = '/home/francisco/project/functional_anaysis_vs_evolution/v_56/Mammals/0_dataset/Homo_sapiens.dN_val' annot = '/home/francisco/project/functional_anaysis_vs_evolution/v_56/Mammals/funcDB/biol_proc_2-8.annot' gaga = Gene_set (infile, annot) lala = gaga.run_gsea() ''' def __init__(self, infile, annot, partitions=30, use_order=True): ''' init function, what is done when object is called. ''' # get gene list and corresponding values self.infile = infile self.use_order = use_order self.genes, self.values, self.order = self._parse_infile() self.annot = self._parse_annot(annot) # sort genes in annot by their values... # useful to know which gene in list1 or list2 self.annot = self._order_genes_in_annot() self.gsea_dic = {} def _parse_infile(self): ''' parse in file in format: geneID_1 <tab> value1 geneID_2 <tab> value2 ... genes should be ordered by value returns genes, values and order of values ''' genes, values = zip (*sorted ((i.strip().split('\t') \ for i in open(self.infile)), \ key=lambda x: float(x[1]))) values = map (float, values) if self.use_order: order = map (values.index, values) else: order = values[:] return genes, dict (zip (genes, values)), order def _parse_annot(self, annot): ''' parse annotations file in format: annotationsA <tab> geneID_1 annotationsA <tab> geneID_2 ... speed notes: * iterator on for for * dico ''' dico = {} for gene, annot in (i.strip().split('\t') for i in open (annot)): # only store genes that we have in our list if self.values.has_key(gene): dico.setdefault (annot, []).append (gene) return dico def _order_genes_in_annot(self): ''' order genes in annot dict by their values ''' dico = {} for annot in self.annot.iterkeys(): dico[annot] = sorted (self.annot[annot], \ key=lambda x: self.values[x]) return dico def run_fatigo (self, list1, list2): ''' computes an enrichment test between two lists of genes ''' def run_gsea (self, partitions=30): ''' run gsea needs python fisher, and fdr from extra stats speed notes: * making annot genes and order local does not significantly speed up process. * putting external method with bissect part neither * no more ideas... ''' pvalues = [] pos = [] total_len = len (self.genes) # create local functions to skip call... faster append_pos = pos.append append_pv = pvalues.append order_index = self.order.index iter_annot = self.annot.iteritems # intialize dict dico = dict (((k, []) for k in self.annot)) # define part size. order[-1] == max(order) rank = float (len (self.order)-1)/partitions # define cutoff value for each partition dico['thresh'] = (bisect_left (self.order, rank * (part + 1)) \ for part in xrange(partitions)) # start fishers for part in xrange(partitions):<|fim▁hole|> continue len_genes1 = len (genes1) len_genes2 = total_len - len_genes1 local_def = genes1.intersection for annot, annot_genes in iter_annot(): append_pos ((part, annot)) #p1 = len (annot_genes & genes1) p1 = len (local_def ( set (annot_genes))) p2 = len (annot_genes) - p1 n1 = len_genes1 - p1 n2 = len_genes2 - p2 dico[annot].append ({'p1': p1, 'n1': n1, 'p2': p2, 'n2': n2}) append_pv (pvalue (p1, n1, p2, n2).two_tail) # compute adjustment of pvalues qvalues = iter (bh_qvalues (pvalues)) pvalues = iter (pvalues) for part, annot in pos: dico[annot][part]['apv'] = qvalues.next() dico[annot][part]['pv' ] = pvalues.next() # store this in Gene_set self.gsea_dic = dico def write_gsea (self, outfile, max_apv=1, all_parts=False): ''' write to file, or pickle ''' def _get_string(dico, annot): ''' get string from gsea_dic current value ''' string = [] string.append (dico['p1' ]) string.append (dico['n1' ]) string.append (dico['p2' ]) string.append (dico['n2' ]) odd = 1 try: odd = log ((float (dico['p1'])\ /dico['n1'])\ /(float (dico['p2'])\ /dico['n2'])) except ZeroDivisionError: if dico['p2'] == 0: odd = float ('inf') elif dico['n1'] == 0: print >> stderr, "WARNING: Empty partition: " \ + str(part) odd = float ('-inf') elif dico['n2'] == 0: print >> stderr, "WARNING: Empty partition: " \ + str(part) odd = float ('inf') string.append (odd) string.append (dico ['pv' ]) string.append (dico ['apv']) string.append (', '.join (list (annot[:dico['p1']]))) string.append (', '.join (list (annot[dico['p1']:]))) return map (str, string) if self.gsea_dic == {}: print >> stderr, 'ERROR: you do not have run GSEA yet...' raise Exception cols = ['#term', 'part', 'term_size', \ 'list1_positives', 'list1_negatives', \ 'list2_positives', 'list2_negatives', \ 'odds_ratio_log', 'pvalue', 'adj_pvalue', \ 'list1_positive_ids', 'list2_positive_ids'] out = open (outfile, 'w') out.write('\t'.join(cols)+'\n') if all_parts: for annot in filter (lambda x: x!= 'thresh', self.gsea_dic): for part in xrange(30): if self.gsea_dic[annot][part]['apv'] > max_apv: continue string = _get_string(self.gsea_dic[annot][part], \ self.annot[annot]) out.write ('\t'.join ([annot] + [str(part)] +string) + '\n') else: for annot in filter (lambda x: x!= 'thresh', self.gsea_dic): part = min (self.gsea_dic[annot], key=lambda x: x['apv']) if max_apv < part['apv']: continue string = _get_string(part, self.annot[annot]) part = self.gsea_dic[annot].index(part) out.write ('\t'.join ([annot] + [str(part)] + string) + '\n') out.close() def main (): ''' for direct command line call ''' opts = get_options() gene_set = Gene_set(opts.infile, opts.annot) gene_set.run_gsea(partitions = opts.partitions) if opts.pickle: from cPickle import dump dump (open (outfile, 'w'), self) else: gene_set.write_gsea(opts.outfile, all_parts=opts.all_parts, \ max_apv=float(opts.max_apv)) def get_options(): ''' parse option from call ''' parser = OptionParser( version=__title__, usage="%prog [options] file [options [file ...]]", description="""\ Gene set enrichment analysis . . ******************************************** """ ) parser.add_option('-i', dest='infile', metavar="PATH", \ help='''path to input file with a ranked list, in format: geneID_1 <tab> value1 geneID_2 <tab> value2 ... ''') parser.add_option('-a', dest='annot', metavar="PATH", \ help='''path to annotations file in format: annotationsA <tab> geneID_1 annotationsA <tab> geneID_2 ... ''') parser.add_option('-o', dest='outfile', metavar="PATH", \ help='path to output file tab separated file') parser.add_option('-R', '--use_rank', action='store_true', \ dest='use_order', default=False, \ help=\ '''[%default] Use rank of genes in stead of provided value to determine thresh value for delimiting different partitions.''') parser.add_option('-p', metavar="INT", dest='partitions', default=30, \ help='''[%default] Number of partitions.''') parser.add_option('--max_apv', metavar="FLOAT", dest='max_apv', default=1, \ help='''[%default] Only write to outfile results with adjusted pvalue higher than specified value.''') parser.add_option('--long', dest='all_parts', action='store_true', default=False, \ help='''[%default] Write results for all partitions.''') parser.add_option('--pickle', action='store_true', \ dest='pickle', default=False, \ help='[%default] Store results in python dict (cPickle) format.') parser.add_option('--verbose', action='store_true', \ dest='verb', default=False, \ help=\ '[%default] Talk a bit... ') opts = parser.parse_args()[0] if not opts.infile or not opts.annot or not opts.outfile: exit(parser.print_help()) return opts if __name__ == "__main__": exit(main())<|fim▁end|>
try: genes1 = set (self.genes[:order_index (dico['thresh'].next())]) except ValueError:
<|file_name|>switch.py<|end_file_name|><|fim▁begin|>"""Support for Firmata switch output.""" import logging from homeassistant.components.switch import SwitchEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_NAME from homeassistant.core import HomeAssistant from .const import ( CONF_INITIAL_STATE, CONF_NEGATE_STATE, CONF_PIN, CONF_PIN_MODE, DOMAIN,<|fim▁hole|> _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities ) -> None: """Set up the Firmata switches.""" new_entities = [] board = hass.data[DOMAIN][config_entry.entry_id] for switch in board.switches: pin = switch[CONF_PIN] pin_mode = switch[CONF_PIN_MODE] initial = switch[CONF_INITIAL_STATE] negate = switch[CONF_NEGATE_STATE] api = FirmataBinaryDigitalOutput(board, pin, pin_mode, initial, negate) try: api.setup() except FirmataPinUsedException: _LOGGER.error( "Could not setup switch on pin %s since pin already in use.", switch[CONF_PIN], ) continue name = switch[CONF_NAME] switch_entity = FirmataSwitch(api, config_entry, name, pin) new_entities.append(switch_entity) if new_entities: async_add_entities(new_entities) class FirmataSwitch(FirmataPinEntity, SwitchEntity): """Representation of a switch on a Firmata board.""" async def async_added_to_hass(self) -> None: """Set up a switch.""" await self._api.start_pin() self.async_write_ha_state() @property def is_on(self) -> bool: """Return true if switch is on.""" return self._api.is_on async def async_turn_on(self, **kwargs) -> None: """Turn on switch.""" _LOGGER.debug("Turning switch %s on", self._name) await self._api.turn_on() self.async_write_ha_state() async def async_turn_off(self, **kwargs) -> None: """Turn off switch.""" _LOGGER.debug("Turning switch %s off", self._name) await self._api.turn_off() self.async_write_ha_state()<|fim▁end|>
) from .entity import FirmataPinEntity from .pin import FirmataBinaryDigitalOutput, FirmataPinUsedException
<|file_name|>stop.go<|end_file_name|><|fim▁begin|>// Copyright © 2017 NAME HERE <EMAIL ADDRESS> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "fmt" "github.com/spf13/cobra" ) // stopCmd represents the stop command var stopCmd = &cobra.Command{ Use: "stop", Short: "A brief description of your command", Long: `A longer description that spans multiple lines and likely contains examples and usage of using your command. For example: Cobra is a CLI library for Go that empowers applications. This application is a tool to generate the needed files to quickly create a Cobra application.`, Run: func(cmd *cobra.Command, args []string) { fmt.Println("stop called") }, } func init() { RootCmd.AddCommand(stopCmd) // Here you will define your flags and configuration settings. <|fim▁hole|> // Cobra supports Persistent Flags which will work for this command // and all subcommands, e.g.: // stopCmd.PersistentFlags().String("foo", "", "A help for foo") // Cobra supports local flags which will only run when this command // is called directly, e.g.: // stopCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") }<|fim▁end|>
<|file_name|>hir_id_validator.rs<|end_file_name|><|fim▁begin|>use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::sync::{par_iter, Lock, ParallelIterator}; use rustc_hir as hir; use rustc_hir::def_id::{LocalDefId, CRATE_DEF_INDEX}; use rustc_hir::intravisit; use rustc_hir::itemlikevisit::ItemLikeVisitor; use rustc_hir::{HirId, ItemLocalId}; use rustc_middle::hir::map::Map; use rustc_middle::ty::TyCtxt; pub fn check_crate(tcx: TyCtxt<'_>) { tcx.dep_graph.assert_ignored(); if tcx.sess.opts.debugging_opts.hir_stats { crate::hir_stats::print_hir_stats(tcx); } let errors = Lock::new(Vec::new()); let hir_map = tcx.hir(); par_iter(&hir_map.krate().modules).for_each(|(&module_id, _)| { hir_map .visit_item_likes_in_module(module_id, &mut OuterVisitor { hir_map, errors: &errors }); }); let errors = errors.into_inner(); if !errors.is_empty() { let message = errors.iter().fold(String::new(), |s1, s2| s1 + "\n" + s2); tcx.sess.delay_span_bug(rustc_span::DUMMY_SP, &message); } } struct HirIdValidator<'a, 'hir> { hir_map: Map<'hir>, owner: Option<LocalDefId>, hir_ids_seen: FxHashSet<ItemLocalId>, errors: &'a Lock<Vec<String>>, } struct OuterVisitor<'a, 'hir> { hir_map: Map<'hir>, errors: &'a Lock<Vec<String>>, } impl<'a, 'hir> OuterVisitor<'a, 'hir> { fn new_inner_visitor(&self, hir_map: Map<'hir>) -> HirIdValidator<'a, 'hir> { HirIdValidator { hir_map, owner: None, hir_ids_seen: Default::default(), errors: self.errors, } } } impl<'a, 'hir> ItemLikeVisitor<'hir> for OuterVisitor<'a, 'hir> { fn visit_item(&mut self, i: &'hir hir::Item<'hir>) { let mut inner_visitor = self.new_inner_visitor(self.hir_map); inner_visitor.check(i.hir_id(), |this| intravisit::walk_item(this, i)); } fn visit_trait_item(&mut self, i: &'hir hir::TraitItem<'hir>) { let mut inner_visitor = self.new_inner_visitor(self.hir_map); inner_visitor.check(i.hir_id(), |this| intravisit::walk_trait_item(this, i)); } fn visit_impl_item(&mut self, i: &'hir hir::ImplItem<'hir>) { let mut inner_visitor = self.new_inner_visitor(self.hir_map); inner_visitor.check(i.hir_id(), |this| intravisit::walk_impl_item(this, i)); } fn visit_foreign_item(&mut self, i: &'hir hir::ForeignItem<'hir>) { let mut inner_visitor = self.new_inner_visitor(self.hir_map); inner_visitor.check(i.hir_id(), |this| intravisit::walk_foreign_item(this, i)); } } impl<'a, 'hir> HirIdValidator<'a, 'hir> { #[cold] #[inline(never)] fn error(&self, f: impl FnOnce() -> String) { self.errors.lock().push(f()); } fn check<F: FnOnce(&mut HirIdValidator<'a, 'hir>)>(&mut self, hir_id: HirId, walk: F) { assert!(self.owner.is_none()); let owner = self.hir_map.local_def_id(hir_id); self.owner = Some(owner); walk(self); if owner.local_def_index == CRATE_DEF_INDEX { return; } // There's always at least one entry for the owning item itself let max = self .hir_ids_seen .iter() .map(|local_id| local_id.as_usize()) .max() .expect("owning item has no entry"); if max != self.hir_ids_seen.len() - 1 { // Collect the missing ItemLocalIds let missing: Vec<_> = (0..=max as u32) .filter(|&i| !self.hir_ids_seen.contains(&ItemLocalId::from_u32(i))) .collect(); // Try to map those to something more useful let mut missing_items = Vec::with_capacity(missing.len()); for local_id in missing { let hir_id = HirId { owner, local_id: ItemLocalId::from_u32(local_id) }; trace!("missing hir id {:#?}", hir_id); missing_items.push(format!( "[local_id: {}, owner: {}]", local_id, self.hir_map.def_path(owner).to_string_no_crate_verbose() )); } self.error(|| { format!( "ItemLocalIds not assigned densely in {}. \ Max ItemLocalId = {}, missing IDs = {:?}; seens IDs = {:?}", self.hir_map.def_path(owner).to_string_no_crate_verbose(), max, missing_items, self.hir_ids_seen .iter() .map(|&local_id| HirId { owner, local_id }) .map(|h| format!("({:?} {})", h, self.hir_map.node_to_string(h))) .collect::<Vec<_>>() ) }); } } } impl<'a, 'hir> intravisit::Visitor<'hir> for HirIdValidator<'a, 'hir> { type Map = Map<'hir>; fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> { intravisit::NestedVisitorMap::OnlyBodies(self.hir_map)<|fim▁hole|> fn visit_id(&mut self, hir_id: HirId) { let owner = self.owner.expect("no owner"); if owner != hir_id.owner { self.error(|| { format!( "HirIdValidator: The recorded owner of {} is {} instead of {}", self.hir_map.node_to_string(hir_id), self.hir_map.def_path(hir_id.owner).to_string_no_crate_verbose(), self.hir_map.def_path(owner).to_string_no_crate_verbose() ) }); } self.hir_ids_seen.insert(hir_id.local_id); } fn visit_impl_item_ref(&mut self, _: &'hir hir::ImplItemRef<'hir>) { // Explicitly do nothing here. ImplItemRefs contain hir::Visibility // values that actually belong to an ImplItem instead of the ItemKind::Impl // we are currently in. So for those it's correct that they have a // different owner. } fn visit_foreign_item_ref(&mut self, _: &'hir hir::ForeignItemRef<'hir>) { // Explicitly do nothing here. ForeignItemRefs contain hir::Visibility // values that actually belong to an ForeignItem instead of the ItemKind::ForeignMod // we are currently in. So for those it's correct that they have a // different owner. } }<|fim▁end|>
}
<|file_name|>test_pep8.py<|end_file_name|><|fim▁begin|>from __future__ import with_statement import unittest import re import os import sys import cStringIO as StringIO from distutils.version import LooseVersion import pep8 PEP8_VERSION = LooseVersion(pep8.__version__) PEP8_MAX_OLD_VERSION = LooseVersion('1.0.1') PEP8_MIN_NEW_VERSION = LooseVersion('1.3.3') # Check for supported version of the pep8 library, # which is anything <= 1.0.1, or >= 1.3.3. (yes, there is a gap) if (PEP8_VERSION > PEP8_MAX_OLD_VERSION and PEP8_VERSION < PEP8_MIN_NEW_VERSION): raise ImportError('Bad pep8 version, must be >= %s or <= %s.' % (PEP8_MIN_NEW_VERSION, PEP8_MAX_OLD_VERSION)) # Skip these pep8 errors/warnings PEP8_IGNORE = ( 'E123', 'E126', 'E127', 'E128', 'E501', ) # Any file or directory(including subdirectories) matching regex will be skipped. NAMES_TO_SKIP = ( '.svn', '.git', 'docs', 'dist', 'build', ) NAMES_TO_SKIP = [re.compile('^%s' % n) for n in NAMES_TO_SKIP] class RedirectIO(object): '''Contextmanager to redirect stdout/stderr.''' def __init__(self, stdout=None, stderr=None): self._stdout = stdout or sys.stdout self._stderr = stderr or sys.stderr def __enter__(self): sys.stdout.flush() sys.stderr.flush() self.old_stdout, self.old_stderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = self._stdout, self._stderr def __exit__(self, exc_type, exc_value, traceback): self._stdout.flush() self._stderr.flush() sys.stdout, sys.stderr = self.old_stdout, self.old_stderr class Pep8TestCase(unittest.TestCase): @classmethod def setUpClass(self): # set up newer pep8 options if PEP8_VERSION >= PEP8_MIN_NEW_VERSION: self.options = pep8.StyleGuide().options self.options.ignore = self.options.ignore + PEP8_IGNORE else: self.options = None # Populate pep8 test methods, one per non-skipped .py file found. ROOT = os.getcwd() for (dirpath, dirnames, filenames) in os.walk(ROOT, followlinks=True): for regex in NAMES_TO_SKIP: paths = dirnames[:] # lame list copy for path in paths: if regex.match(path): dirnames.remove(path) files = filenames[:] # lame list copy for filename in [f for f in filenames if not regex.match(f)]: if not filename.endswith('.py'): continue fullpath = os.path.join(dirpath, filename) if PEP8_VERSION < PEP8_MIN_NEW_VERSION: def closure(self, fullpath=fullpath): pep8.process_options([ '--first', fullpath, '--ignore', ','.join(PEP8_IGNORE)], ) pep8.input_file(fullpath) if len(pep8.get_statistics()): self.fail('PEP8 issue in "%s"' % fullpath) else: def closure(self, fullpath=fullpath): checker = pep8.Checker(fullpath, options=self.options) capture = StringIO.StringIO() with RedirectIO(capture): errors = checker.check_all() if errors > 0: capture.seek(0) errors = list() for error in capture.readlines(): errors.append('./%s' % error[len(ROOT) + 1:].strip()) self.fail('PEP8 issue in "%s"\n%s' % (fullpath, '\n'.join(errors)))<|fim▁hole|> del closure # Necessary so nosetests doesn't make testcase out of it.<|fim▁end|>
relativepath = fullpath[len(ROOT) + 1:] func_name = 'test_pep8./%s' % relativepath # Surprised invalid identifiers work. closure.__name__ = func_name setattr(Pep8TestCase, func_name, closure)
<|file_name|>test_data.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Copyright (C) Duncan Macleod (2013) # # This file is part of GWSumm. # # GWSumm is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GWSumm is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GWSumm. If not, see <http://www.gnu.org/licenses/>. """Tests for `gwsumm.data` """ import os.path import operator import tempfile import shutil from collections import OrderedDict from urllib.request import urlopen import pytest from numpy import (arange, testing as nptest) from lal.utils import CacheEntry from glue.lal import Cache from gwpy.timeseries import TimeSeries from gwpy.detector import Channel from gwpy.segments import (Segment, SegmentList) from gwsumm import (data, globalv) from gwsumm.data import (utils, mathutils) from .common import empty_globalv_CHANNELS __author__ = 'Duncan Macleod <[email protected]>' LOSC_DATA = { 'H1:LOSC-STRAIN': ['https://losc.ligo.org/s/events/GW150914/' 'H-H1_LOSC_4_V1-1126259446-32.gwf'], 'L1:LOSC-STRAIN': ['https://losc.ligo.org/s/events/GW150914/' 'L-L1_LOSC_4_V1-1126259446-32.gwf'], } LOSC_SEGMENTS = SegmentList([Segment(1126259446, 1126259478)]) def download(remote, target=None): """Download a file """ if target is None: suffix = os.path.splitext(remote)[1] _, target = tempfile.mkstemp(suffix=suffix, prefix='gwsumm-tests-') response = urlopen(remote) with open(target, 'wb') as f: f.write(response.read()) return target class TestData(object): """Tests for :mod:`gwsumm.data`: """ @classmethod def setup_class(cls): cls.FRAMES = {} cls._tempdir = tempfile.mkdtemp(prefix='gwsumm-test-data-') # get data for channel in LOSC_DATA: cls.FRAMES[channel] = Cache() for gwf in LOSC_DATA[channel]: target = os.path.join(cls._tempdir, os.path.basename(gwf)) download(gwf, target) cls.FRAMES[channel].append(CacheEntry.from_T050017(target)) @classmethod def teardown_class(cls): # remove the temporary data shutil.rmtree(cls._tempdir) # -- test utilities ------------------------- def test_find_frame_type(self): channel = Channel('L1:TEST-CHANNEL') assert data.find_frame_type(channel) == 'L1_R' channel = Channel('C1:TEST-CHANNEL') assert data.find_frame_type(channel) == 'R' channel = Channel('H1:TEST-CHANNEL.rms,s-trend') assert data.find_frame_type(channel) == 'H1_T' channel = Channel('H1:TEST-CHANNEL.rms,m-trend') assert data.find_frame_type(channel) == 'H1_M' channel = Channel('H1:TEST-CHANNEL.rms,reduced') assert data.find_frame_type(channel) == 'H1_LDAS_C02_L2' channel = Channel('H1:TEST-CHANNEL.rms,online') assert data.find_frame_type(channel) == 'H1_lldetchar' def test_get_channel_type(self): assert data.get_channel_type('L1:TEST-CHANNEL') == 'adc' assert data.get_channel_type('G1:DER_DATA_HL') == 'proc' assert data.get_channel_type('H1:GDS-CALIB_STRAIN') == 'proc' assert data.get_channel_type('V1:GDS-CALIB_STRAIN') == 'adc' <|fim▁hole|> stride=123.456, window='test-window', method='scipy-welch', ) key = utils.make_globalv_key('L1:TEST-CHANNEL', fftparams) assert key == ';'.join([ 'L1:TEST-CHANNEL', # channel 'scipy-welch', # method '', # fftlength '', # overlap 'test-window', # window '123.456', # stride '', # FFT scheme ]) def test_get_fftparams(self): fftparams = utils.get_fftparams('L1:TEST-CHANNEL') assert isinstance(fftparams, utils.FftParams) for key in utils.FFT_PARAMS: assert (getattr(fftparams, key) is utils.DEFAULT_FFT_PARAMS.get(key, None)) fftparams = utils.get_fftparams('L1:TEST-CHANNEL', window='hanning', overlap=0) assert fftparams.window == 'hanning' assert fftparams.overlap == 0 with pytest.raises(ZeroDivisionError): utils.get_fftparams(None, stride=0) @pytest.mark.parametrize('definition, math', [ ( 'L1:TEST + L1:TEST2', ([('L1:TEST', 'L1:TEST2'), ([], [])], [operator.add]), ), ( 'L1:TEST + L1:TEST2 * 2', ([('L1:TEST', 'L1:TEST2'), ([], [(operator.mul, 2)])], [operator.add]), ), ( 'L1:TEST * 2 + L1:TEST2 ^ 5', ([('L1:TEST', 'L1:TEST2'), ([(operator.mul, 2)], [(operator.pow, 5)])], [operator.add]), ), ]) def test_parse_math_definition(self, definition, math): chans, operators = mathutils.parse_math_definition(definition) assert chans == OrderedDict(list(zip(*math[0]))) assert operators == math[1] # -- test add/get methods ------------------- def test_add_timeseries(self): a = TimeSeries([1, 2, 3, 4, 5], name='test name', epoch=0, sample_rate=1) # test simple add using 'name' data.add_timeseries(a) assert 'test name' in globalv.DATA assert len(globalv.DATA['test name']) == 1 assert globalv.DATA['test name'][0] is a # test add using key kwarg data.add_timeseries(a, key='test key') assert globalv.DATA['test key'][0] is a # test add to existing key with coalesce b = TimeSeries([6, 7, 8, 9, 10], name='test name 2', epoch=5, sample_rate=1) data.add_timeseries(b, key='test key', coalesce=True) assert len(globalv.DATA['test key']) == 1 nptest.assert_array_equal(globalv.DATA['test key'][0].value, arange(1, 11)) def test_get_timeseries(self): # empty globalv.DATA globalv.DATA = type(globalv.DATA)() # test simple get after add a = TimeSeries([1, 2, 3, 4, 5], name='test name', epoch=0, sample_rate=1) data.add_timeseries(a) b, = data.get_timeseries('test name', [(0, 5)], nproc=1) nptest.assert_array_equal(a.value, b.value) assert a.sample_rate.value == b.sample_rate.value # test more complicated add with a cache a, = data.get_timeseries('H1:LOSC-STRAIN', LOSC_SEGMENTS, cache=self.FRAMES['H1:LOSC-STRAIN'], nproc=1) b, = data.get_timeseries('H1:LOSC-STRAIN', LOSC_SEGMENTS, nproc=1) nptest.assert_array_equal(a.value, b.value) @empty_globalv_CHANNELS def test_get_spectrogram(self): with pytest.raises(TypeError): data.get_spectrogram('H1:LOSC-STRAIN', LOSC_SEGMENTS, cache=self.FRAMES['H1:LOSC-STRAIN'], nproc=1) data.get_spectrogram('H1:LOSC-STRAIN', LOSC_SEGMENTS, cache=self.FRAMES['H1:LOSC-STRAIN'], stride=4, fftlength=2, overlap=1, nproc=1) def test_get_spectrum(self): a, _, _ = data.get_spectrum('H1:LOSC-STRAIN', LOSC_SEGMENTS, cache=self.FRAMES['H1:LOSC-STRAIN'], nproc=1) b, _, _ = data.get_spectrum('H1:LOSC-STRAIN', LOSC_SEGMENTS, format='asd', cache=self.FRAMES['H1:LOSC-STRAIN'], nproc=1) nptest.assert_array_equal(a.value ** (1/2.), b.value) def test_get_coherence_spectrogram(self): cache = Cache([e for c in self.FRAMES for e in self.FRAMES[c]]) data.get_coherence_spectrogram( ('H1:LOSC-STRAIN', 'L1:LOSC-STRAIN'), LOSC_SEGMENTS, cache=cache, stride=4, fftlength=2, overlap=1, nproc=1, ) def test_get_coherence_spectrum(self): cache = Cache([e for c in self.FRAMES for e in self.FRAMES[c]]) data.get_coherence_spectrogram( ('H1:LOSC-STRAIN', 'L1:LOSC-STRAIN'), LOSC_SEGMENTS, cache=cache, stride=4, fftlength=2, overlap=1, nproc=1, )<|fim▁end|>
@empty_globalv_CHANNELS def test_make_globalv_key(self): fftparams = utils.get_fftparams( 'L1:TEST-CHANNEL',
<|file_name|>matchesversiontest.py<|end_file_name|><|fim▁begin|>## begin license ## # # "Meresco Distributed" has components for group management based on "Meresco Components." # # Copyright (C) 2018, 2021 Seecr (Seek You Too B.V.) https://seecr.nl # Copyright (C) 2021 Data Archiving and Network Services https://dans.knaw.nl # Copyright (C) 2021 SURF https://www.surf.nl # Copyright (C) 2021 Stichting Kennisnet https://www.kennisnet.nl # Copyright (C) 2021 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl # # This file is part of "Meresco Distributed" # # "Meresco Distributed" is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # "Meresco Distributed" is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with "Meresco Distributed"; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #<|fim▁hole|> from os.path import join, isfile from uuid import uuid4 from seecr.test import SeecrTestCase, CallTrace from weightless.core import be, asString, consume, NoneOfTheObserversRespond, retval from meresco.core import Observable from meresco.distributed.constants import WRITABLE, READABLE from meresco.distributed.utils import usrSharePath from meresco.distributed.failover import MatchesVersion, Proxy, ServiceConfig from meresco.distributed.failover._matchesversion import betweenVersionCondition class MatchesVersionTest(SeecrTestCase): def setUp(self): SeecrTestCase.setUp(self) self.matchesVersion = MatchesVersion(minVersion='1', untilVersion='3') self.observer = CallTrace('observer', methods=dict(somemessage=lambda: (x for x in ['result'])), emptyGeneratorMethods=['updateConfig']) self.top = be((Observable(), (self.matchesVersion, (self.observer,) ) )) def testDoesNotMatchNoConfig(self): self.assertEqual('', asString(self.top.all.somemessage())) self.assertEqual([], self.observer.calledMethodNames()) def testDoesNotMatchNoVersion(self): consume(self.matchesVersion.updateConfig(config={'foo': 'bar'})) self.assertEqual('', asString(self.top.all.somemessage())) self.assertEqual(['updateConfig'], self.observer.calledMethodNames()) def testDoesNotMatch(self): consume(self.matchesVersion.updateConfig(**{'software_version': '0.1', 'config':{'foo': 'bar'}})) self.assertEqual('', asString(self.top.all.somemessage())) self.assertEqual(['updateConfig'], self.observer.calledMethodNames()) def testDoesMatch(self): consume(self.matchesVersion.updateConfig(software_version='2')) self.assertEqual('result', asString(self.top.all.somemessage())) self.assertEqual(['updateConfig', 'somemessage'], self.observer.calledMethodNames()) def testDeterminesConfig(self): newId = lambda: str(uuid4()) services = { newId(): {'type': 'service1', 'ipAddress': '10.0.0.2', 'infoport': 1234, 'active': True, 'readable': True, 'writable': True, 'data': {'VERSION': '1.5'}}, newId(): {'type': 'service2', 'ipAddress': '10.0.0.3', 'infoport': 1235, 'active': True, 'readable': True, 'writable': True, 'data': {'VERSION': '1.8'}}, } config = { 'service1.frontend': { 'fqdn': 'service1.front.example.org', 'ipAddress': '1.2.3.4', }, 'service2.frontend': { 'fqdn': 'service2.front.example.org', 'ipAddress': '1.2.3.5', }, } configFile = join(self.tempdir, 'server.conf') top = be( (Proxy(nginxConfigFile=configFile), (MatchesVersion( minVersion='1.4', untilVersion='2.0'), (ServiceConfig( type='service1', minVersion='1.4', untilVersion='2.0', flag=WRITABLE), ), ), (MatchesVersion( minVersion='1.4', untilVersion='4.0'), (ServiceConfig( type='service2', minVersion='1.4', untilVersion='2.0', flag=READABLE), ) ) ) ) mustUpdate, sleeptime = top.update(software_version='3.0', config=config, services=services, verbose=False) self.assertTrue(mustUpdate) self.assertEqual(30, sleeptime) self.assertTrue(isfile(configFile)) with open(configFile) as fp: self.assertEqualText("""## Generated by meresco.distributed.failover.Proxy upstream __var_3ff29304e7437997bf4171776e1fe282_service2 { server 10.0.0.3:1235; } server { listen 1.2.3.5:80; server_name service2.front.example.org; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; location / { proxy_pass http://__var_3ff29304e7437997bf4171776e1fe282_service2; } error_page 500 502 503 504 =503 /unavailable.html; location /unavailable.html { root %s/failover; } client_max_body_size 0; } """ % usrSharePath, fp.read()) # MatchesVersion is expected to be invoked with 'all', but testing for 'do', 'call' and 'any' invocation just in case def testDoesNotMatchDo(self): consume(self.matchesVersion.updateConfig(**{'software_version': '0.1'})) self.top.do.somemessage() self.assertEqual(['updateConfig'], self.observer.calledMethodNames()) def testDoesMatchDo(self): consume(self.matchesVersion.updateConfig(**{'software_version': '2'})) self.top.do.anothermessage() self.assertEqual(['updateConfig', 'anothermessage'], self.observer.calledMethodNames()) def testDoesNotMatchCall(self): consume(self.matchesVersion.updateConfig(**{'software_version': '0.1'})) try: _ = self.top.call.somemessage() self.fail() except NoneOfTheObserversRespond: pass self.assertEqual(['updateConfig'], self.observer.calledMethodNames()) def testDoesMatchCall(self): consume(self.matchesVersion.updateConfig(**{'software_version': '2'})) _ = self.top.call.somemessage() self.assertEqual(['updateConfig', 'somemessage'], self.observer.calledMethodNames()) def testDoesNotMatchAny(self): consume(self.matchesVersion.updateConfig(**{'software_version': '0.1'})) try: _ = retval(self.top.any.somemessage()) self.fail() except NoneOfTheObserversRespond: pass self.assertEqual(['updateConfig'], self.observer.calledMethodNames()) def testDoesMatchAny(self): consume(self.matchesVersion.updateConfig(**{'software_version': '2'})) _ = retval(self.top.any.somemessage()) self.assertEqual(['updateConfig', 'somemessage'], self.observer.calledMethodNames()) def testBetweenVersionCondition(self): inbetween = betweenVersionCondition('1.3', '8') self.assertTrue(inbetween('1.3')) self.assertTrue(inbetween('1.3.x')) self.assertTrue(inbetween('7.9')) self.assertFalse(inbetween('8.0')) self.assertFalse(inbetween('8')) self.assertFalse(inbetween('77')) self.assertFalse(inbetween('1.2.x'))<|fim▁end|>
## end license ##
<|file_name|>config.js<|end_file_name|><|fim▁begin|>// -------------------------------------------------------------------------------------------------------------------- // // cloudfront-config.js - config for AWS CloudFront // // Copyright (c) 2011 AppsAttic Ltd - http://www.appsattic.com/ // Written by Andrew Chilton <[email protected]> // // License: http://opensource.org/licenses/MIT // // -------------------------------------------------------------------------------------------------------------------- var data2xml = require('data2xml')({ attrProp : '@', valProp : '#', }); // -------------------------------------------------------------------------------------------------------------------- function pathDistribution(options, args) { return '/' + this.version() + '/distribution'; } function pathDistributionId(options, args) { return '/' + this.version() + '/distribution/' + args.DistributionId; } function pathDistributionIdConfig(options, args) { return '/' + this.version() + '/distribution/' + args.DistributionId + '/config'; } function pathDistributionInvalidation(options, args) { return '/' + this.version() + '/distribution/' + args.DistributionId + '/invalidation'; } function pathDistributionInvalidationId(options, args) { return '/' + this.version() + '/distribution/' + args.DistributionId + '/invalidation/' + args.InvalidationId; } function pathStreamingDistribution(options, args) { return '/' + this.version() + '/streaming-distribution'; } function pathStreamingDistributionId(options, args) { return '/' + this.version() + '/streaming-distribution/' + args.DistributionId; } function pathStreamingDistributionIdConfig(options, args) { return '/' + this.version() + '/streaming-distribution/' + args.DistributionId + '/config'; } function pathOai(options, args) { return '/' + this.version() + '/origin-access-identity/cloudfront'; } function pathOaiId(options, args) { return '/' + this.version() + '/origin-access-identity/cloudfront/' + args.OriginAccessId; } function pathOaiIdConfig(options, args) { return '/' + this.version() + '/origin-access-identity/cloudfront/' + args.OriginAccessId + '/config'; } function bodyDistributionConfig(options, args) { // create the XML var data = { '@' : { 'xmlns' : 'http://cloudfront.amazonaws.com/doc/2010-11-01/' }, }; if ( args.S3OriginDnsName ) { data.S3Origin = {}; data.S3Origin.DNSName = args.S3OriginDnsName; if ( args.S3OriginOriginAccessIdentity ) { data.S3Origin.OriginAccessIdentity = args.S3OriginOriginAccessIdentity; } } if ( args.CustomOriginDnsName || args.CustomOriginOriginProtocolPolicy ) { data.CustomOrigin = {}; if ( args.CustomOriginDnsName ) { data.CustomOrigin.DNSName = args.CustomOriginDnsName; } if ( args.CustomOriginHttpPort ) { data.CustomOrigin.HTTPPort = args.CustomOriginHttpPort; } if ( args.CustomOriginHttpsPort ) { data.CustomOrigin.HTTPSPort = args.CustomOriginHttpsPort; } if ( args.CustomOriginOriginProtocolPolicy ) { data.CustomOrigin.OriginProtocolPolicy = args.CustomOriginOriginProtocolPolicy; } } data.CallerReference = args.CallerReference; if ( args.Cname ) { data.CNAME = args.Cname; } if ( args.Comment ) { data.Comment = args.Comment; } if ( args.DefaultRootObject ) { data.DefaultRootObject = args.DefaultRootObject; } data.Enabled = args.Enabled; if ( args.PriceClass ) { data.PriceClass = args.PriceClass; } if ( args.LoggingBucket ) { data.Logging = {}; data.Logging.Bucket = args.LoggingBucket; if ( args.LoggingPrefix ) { data.Logging.Prefix = args.LoggingPrefix; } } if ( args.TrustedSignersSelf || args.TrustedSignersAwsAccountNumber ) { data.TrustedSigners = {}; if ( args.TrustedSignersSelf ) { data.TrustedSigners.Self = ''; } if ( args.TrustedSignersAwsAccountNumber ) { data.TrustedSigners.AwsAccountNumber = args.TrustedSignersAwsAccountNumber; } } if ( args.RequiredProtocolsProtocol ) { data.RequiredProtocols = {}; data.RequiredProtocols.Protocol = args.RequiredProtocolsProtocol; } return data2xml('DistributionConfig', data); } function bodyStreamingDistributionConfig(options, args) { // create the XML var data = { '@' : { 'xmlns' : 'http://cloudfront.amazonaws.com/doc/2010-11-01/' }, }; if ( args.S3OriginDnsName ) { data.S3Origin = {}; data.S3Origin.DNSName = args.S3OriginDnsName; if ( args.S3OriginOriginAccessIdentity ) { data.S3Origin.OriginAccessIdentity = args.S3OriginOriginAccessIdentity; } } data.CallerReference = args.CallerReference; if ( args.Cname ) { data.CNAME = args.Cname; } if ( args.Comment ) { data.Comment = args.Comment; } data.Enabled = args.Enabled; if ( args.PriceClass ) { data.PriceClass = args.PriceClass; } if ( args.LoggingBucket ) { data.Logging = {}; data.Logging.Bucket = args.LoggingBucket; if ( args.LoggingPrefix ) { data.Logging.Prefix = args.LoggingPrefix; } } if ( args.TrustedSignersSelf || args.TrustedSignersAwsAccountNumber ) { data.TrustedSigners = {}; if ( args.TrustedSignersSelf ) { data.TrustedSigners.Self = ''; } if ( args.TrustedSignersAwsAccountNumber ) { data.TrustedSigners.AwsAccountNumber = args.TrustedSignersAwsAccountNumber; } } return data2xml('StreamingDistributionConfig', data); } function bodyOaiConfig(options, args) { var self = this; var data = { '@' : { xmlns : 'http://cloudfront.amazonaws.com/doc/2010-11-01/', }, CallerReference : args.CallerReference, }; if ( args.Comments ) { data.Comments = args.Comments; } return data2xml('CloudFrontOriginAccessIdentityConfig', data); } // -------------------------------------------------------------------------------------------------------------------- module.exports = { // Operations on Distributions CreateDistribution : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/CreateDistribution.html', method : 'POST', path : pathDistribution, args : { // S3Origin Elements DnsName : { type : 'special', required : false, }, OriginAccessIdentity : { type : 'special', required : false, }, // CustomOrigin elements CustomOriginDnsName : { type : 'special', required : false, }, CustomOriginHttpPort : { type : 'special', required : false, }, CustomOriginHttpsPort : { type : 'special', required : false, }, CustomOriginOriginProtocolPolicy : { type : 'special', required : false, }, // other top level elements CallerReference : { type : 'special', required : true, }, Cname : { type : 'special', required : false, }, Comment : { type : 'special', required : false, }, Enabled : { type : 'special', required : true, }, DefaultRootObject : { type : 'special', required : true, }, // Logging Elements LoggingBucket : { type : 'special', required : false, }, LoggingPrefix : { type : 'special', required : false, }, // TrustedSigners Elements TrustedSignersSelf : { type : 'special', required : false, }, TrustedSignersAwsAccountNumber : { type : 'special', required : false, }, RequiredProtocols : { type : 'special', required : false, }, }, body : bodyDistributionConfig, statusCode: 201, }, ListDistributions : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/ListDistributions.html', path : pathDistribution, args : { Marker : { required : false, type : 'param', }, MaxItems : { required : false, type : 'param', }, }, }, GetDistribution : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/GetDistribution.html', path : pathDistributionId, args : { DistributionId : { required : true, type : 'special', }, }, }, GetDistributionConfig : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/GetConfig.html', path : pathDistributionIdConfig, args : { DistributionId : { required : true, type : 'special', }, }, }, PutDistributionConfig : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/PutConfig.html', method : 'PUT', path : pathDistributionIdConfig, args : { DistributionId : { required : true, type : 'special', }, IfMatch : { name : 'If-Match', required : true, type : 'header' }, // S3Origin Elements DnsName : { type : 'special', required : false, }, OriginAccessIdentity : { type : 'special', required : false, }, // CustomOrigin elements CustomOriginDnsName : { type : 'special', required : false, }, CustomOriginHttpPort : { type : 'special', required : false, }, CustomOriginHttpsPort : { type : 'special', required : false, }, CustomOriginOriginProtocolPolicy : { type : 'special', required : false, }, // other top level elements CallerReference : { type : 'special', required : true, }, Cname : { type : 'special', required : false, }, Comment : { type : 'special', required : false, }, Enabled : { type : 'special', required : true, }, PriceClass : { type : 'special', required : false, }, DefaultRootObject : { type : 'special', required : true, }, // Logging Elements LoggingBucket : { type : 'special', required : false, }, LoggingPrefix : { type : 'special', required : false, }, // TrustedSigners Elements TrustedSignersSelf : { type : 'special', required : false, }, TrustedSignersAwsAccountNumber : { type : 'special', required : false, }, RequiredProtocols : { type : 'special', required : false, }, }, body : bodyDistributionConfig, }, DeleteDistribution : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/DeleteDistribution.html', method : 'DELETE', path : pathDistributionId, args : { DistributionId : { required : true, type : 'special', }, IfMatch : { name : 'If-Match', required : true, type : 'header' }, }, statusCode : 204, }, // Operations on Streaming Distributions CreateStreamingDistribution : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/CreateStreamingDistribution.html', method : 'POST', path : pathStreamingDistribution, args : { // S3Origin Elements S3OriginDnsName : { type : 'special', required : false, }, S3OriginOriginAccessIdentity : { type : 'special', required : false, }, // other top level elements CallerReference : { type : 'special', required : true, }, Cname : { type : 'special', required : false, }, Comment : { type : 'special', required : false, }, Enabled : { type : 'special', required : true, }, PriceClass : { type : 'special', required : false, }, // Logging Elements LoggingBucket : { type : 'special', required : false, }, LoggingPrefix : { type : 'special', required : false, }, // TrustedSigners Elements TrustedSignersSelf : { type : 'special', required : false, }, TrustedSignersAwsAccountNumber : { type : 'special', required : false, }, }, body : bodyStreamingDistributionConfig, }, ListStreamingDistributions : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/ListStreamingDistributions.html', path : pathStreamingDistribution, args : { Marker : { required : false, type : 'param', }, MaxItems : { required : false, type : 'param', }, }, }, GetStreamingDistribution : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/GetStreamingDistribution.html', path : pathStreamingDistributionId, args : { DistributionId : { required : true, type : 'special', }, }, }, GetStreamingDistributionConfig : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/GetStreamingDistConfig.html', path : pathStreamingDistributionIdConfig, args : { DistributionId : { required : true, type : 'special', }, }, }, PutStreamingDistributionConfig : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/PutStreamingDistConfig.html', method : 'PUT', path : pathStreamingDistributionIdConfig, args : { DistributionId : { required : true, type : 'special', }, IfMatch : { name : 'If-Match', required : true, type : 'header' }, // S3Origin Elements DnsName : { type : 'special', required : false, }, OriginAccessIdentity : { type : 'special', required : false, }, // other top level elements CallerReference : { type : 'special', required : true, }, Cname : { type : 'special', required : false, }, Comment : { type : 'special', required : false, }, Enabled : { type : 'special', required : true, }, // Logging Elements LoggingBucket : { type : 'special', required : false, }, LoggingPrefix : { type : 'special', required : false, }, // TrustedSigners Elements TrustedSignersSelf : { type : 'special', required : false, }, TrustedSignersAwsAccountNumber : { type : 'special', required : false, }, }, body : bodyStreamingDistributionConfig, }, DeleteStreamingDistribution : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/DeleteStreamingDistribution.html', method : 'DELETE', path : pathStreamingDistributionId, args : { DistributionId : { required : true, type : 'special', }, IfMatch : { name : 'If-Match', required : true, type : 'header' }, }, statusCode : 204, }, // Operations on Origin Access Identities CreateOai : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/CreateOAI.html', method : 'POST', path : pathOai, args : { CallerReference : { required : true, type : 'special', }, Comment : { required : false, type : 'special', }, }, body : bodyOaiConfig, statusCode: 201, }, ListOais : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/ListOAIs.html', path : pathOai, args : { Marker : { required : false, type : 'param', }, MaxItems : { required : false, type : 'param', }, }, }, GetOai : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/GetOAI.html', path : pathOaiId, args : { OriginAccessId : { required : true, type : 'special', }, }, }, GetOaiConfig : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/GetOAIConfig.html', path : pathOaiIdConfig, args : { OriginAccessId : { required : true, type : 'special', }, }, }, PutOaiConfig : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/PutOAIConfig.html', method : 'PUT', path : pathOai, args : { OriginAccessId : { required : true, type : 'special', }, CallerReference : { required : true, type : 'special', }, Comment : { required : false, type : 'special', }, }, body : bodyOaiConfig, }, DeleteOai : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/DeleteOAI.html', method : 'DELETE', path : pathOaiId, args : { OriginAccessId : { required : true, type : 'special', }, IfMatch : { name : 'If-Match', required : true, type : 'header' }, }, statusCode : 204, }, // Operations on Invalidations CreateInvalidation : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/CreateInvalidation.html', method : 'POST', path : pathDistributionInvalidation, args : { DistributionId : { required : true, type : 'special', }, Path : { required : true, type : 'special', }, CallerReference : { required : false, type : 'special', }, }, body : function(options, args) { var self = this;<|fim▁hole|> data.CallerReference = args.CallerReference; } return data2xml('InvalidationBatch', data); }, }, ListInvalidations : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/ListInvalidation.html', path : pathDistributionInvalidation, args : { DistributionId : { required : true, type : 'special', }, Marker : { required : false, type : 'param', }, MaxItems : { required : false, type : 'param', }, }, }, GetInvalidation : { url : 'http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/GetInvalidation.html', path : pathDistributionInvalidationId, args : { DistributionId : { required : true, type : 'special', }, Marker : { required : false, type : 'param', }, MaxItems : { required : false, type : 'param', }, }, }, }; // --------------------------------------------------------------------------------------------------------------------<|fim▁end|>
var data = { Path : args.Path, }; if ( args.CallerReference ) {
<|file_name|>base.ts<|end_file_name|><|fim▁begin|>export default class BaseTitle { constructor(public element: HTMLElement) { } public normalizeWhitespace(title: string = ""): string { return (title || "").trim().replace(/\s/g, " "); } get title(): string { return this.element.title;<|fim▁hole|><|fim▁end|>
} }
<|file_name|>containers.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 MaidSafe.net limited. // // This SAFE Network Software is licensed to you under (1) the MaidSafe.net Commercial License, // version 1.0 or later, or (2) The General Public License (GPL), version 3, depending on which // licence you accepted on initial access to the Software (the "Licences"). // // By contributing code to the SAFE Network Software, or to this project generally, you agree to be // bound by the terms of the MaidSafe Contributor Agreement. This, along with the Licenses can be // found in the root directory of this project at LICENSE, COPYING and CONTRIBUTOR. // // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. // // Please review the Licences for the specific language governing permissions and limitations // relating to use of the SAFE Network Software. use super::{AppExchangeInfo, ContainerPermissions, containers_from_repr_c, containers_into_vec}; use ffi::ipc::req as ffi; use ffi_utils::{ReprC, StringError, vec_into_raw_parts}; use ipc::errors::IpcError; use std::collections::HashMap; /// Containers request #[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] pub struct ContainersReq { /// Exchange info pub app: AppExchangeInfo, /// Requested containers pub containers: HashMap<String, ContainerPermissions>, } impl ContainersReq { /// Consumes the object and returns the FFI counterpart. /// /// You're now responsible for freeing the subobjects memory once you're /// done. pub fn into_repr_c(self) -> Result<ffi::ContainersReq, IpcError> { let ContainersReq { app, containers } = self; let containers = containers_into_vec(containers).map_err(StringError::from)?; let (containers_ptr, containers_len, containers_cap) = vec_into_raw_parts(containers); Ok(ffi::ContainersReq { app: app.into_repr_c()?, containers: containers_ptr, containers_len, containers_cap, }) } } impl ReprC for ContainersReq { type C = *const ffi::ContainersReq; type Error = IpcError; /// Constructs the object from the FFI counterpart. /// /// After calling this functions, the subobjects memory is owned by the /// resulting object. unsafe fn clone_from_repr_c(repr_c: *const ffi::ContainersReq) -> Result<Self, IpcError> {<|fim▁hole|> }) } }<|fim▁end|>
Ok(ContainersReq { app: AppExchangeInfo::clone_from_repr_c(&(*repr_c).app)?, containers: containers_from_repr_c((*repr_c).containers, (*repr_c).containers_len)?,
<|file_name|>here-now.py<|end_file_name|><|fim▁begin|>## www.pubnub.com - PubNub Real-time push service in the cloud. # coding=utf8 ## PubNub Real-time Push APIs and Notifications Framework ## Copyright (c) 2010 Stephen Blum ## http://www.pubnub.com/ import sys from pubnub import PubnubTornado as Pubnub publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo' subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo' secret_key = len(sys.argv) > 3 and sys.argv[3] or 'demo' cipher_key = len(sys.argv) > 4 and sys.argv[4] or '' ssl_on = len(sys.argv) > 5 and bool(sys.argv[5]) or False ## ----------------------------------------------------------------------- ## Initiate Pubnub State ## ----------------------------------------------------------------------- pubnub = Pubnub(publish_key=publish_key, subscribe_key=subscribe_key, secret_key=secret_key, cipher_key=cipher_key, ssl_on=ssl_on) channel = 'hello_world' # Asynchronous usage def callback(message):<|fim▁hole|> print(message) pubnub.here_now(channel, callback=callback, error=callback) pubnub.start()<|fim▁end|>
<|file_name|>wrapper_traits.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #![allow(unsafe_code)] use HTMLCanvasData; use LayoutNodeType; use OpaqueStyleAndLayoutData; use SVGSVGData; use atomic_refcell::AtomicRefCell; use gfx_traits::{ByteIndex, FragmentType, ScrollRootId}; use html5ever_atoms::{Namespace, LocalName}; use msg::constellation_msg::PipelineId; use range::Range; use servo_url::ServoUrl; use std::fmt::Debug; use std::sync::Arc; use style::computed_values::display; use style::context::SharedStyleContext; use style::data::ElementData; use style::dom::{LayoutIterator, NodeInfo, PresentationalHintsSynthetizer, TNode}; use style::dom::OpaqueNode; use style::properties::{CascadeFlags, ServoComputedValues}; use style::selector_parser::{PseudoElement, PseudoElementCascadeType, SelectorImpl}; #[derive(Copy, PartialEq, Clone, Debug)] pub enum PseudoElementType<T> { Normal, Before(T), After(T), DetailsSummary(T), DetailsContent(T), } impl<T> PseudoElementType<T> { pub fn is_before(&self) -> bool { match *self { PseudoElementType::Before(_) => true, _ => false, } } pub fn is_replaced_content(&self) -> bool { match *self { PseudoElementType::Before(_) | PseudoElementType::After(_) => true, _ => false, } } pub fn strip(&self) -> PseudoElementType<()> { match *self { PseudoElementType::Normal => PseudoElementType::Normal, PseudoElementType::Before(_) => PseudoElementType::Before(()), PseudoElementType::After(_) => PseudoElementType::After(()), PseudoElementType::DetailsSummary(_) => PseudoElementType::DetailsSummary(()), PseudoElementType::DetailsContent(_) => PseudoElementType::DetailsContent(()), } } pub fn style_pseudo_element(&self) -> PseudoElement { match *self { PseudoElementType::Normal => unreachable!("style_pseudo_element called with PseudoElementType::Normal"), PseudoElementType::Before(_) => PseudoElement::Before, PseudoElementType::After(_) => PseudoElement::After, PseudoElementType::DetailsSummary(_) => PseudoElement::DetailsSummary, PseudoElementType::DetailsContent(_) => PseudoElement::DetailsContent, } } } /// Trait to abstract access to layout data across various data structures. pub trait GetLayoutData { fn get_style_and_layout_data(&self) -> Option<OpaqueStyleAndLayoutData>; } /// A wrapper so that layout can access only the methods that it should have access to. Layout must /// only ever see these and must never see instances of `LayoutJS`. pub trait LayoutNode: Debug + GetLayoutData + TNode { type ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode; fn to_threadsafe(&self) -> Self::ConcreteThreadSafeLayoutNode; /// Returns the type ID of this node. fn type_id(&self) -> LayoutNodeType; unsafe fn init_style_and_layout_data(&self, data: OpaqueStyleAndLayoutData); unsafe fn take_style_and_layout_data(&self) -> OpaqueStyleAndLayoutData; fn rev_children(self) -> LayoutIterator<ReverseChildrenIterator<Self>> { LayoutIterator(ReverseChildrenIterator { current: self.last_child(), }) } fn traverse_preorder(self) -> TreeIterator<Self> { TreeIterator::new(self) } fn first_child(&self) -> Option<Self>; fn last_child(&self) -> Option<Self>; fn prev_sibling(&self) -> Option<Self>; fn next_sibling(&self) -> Option<Self>; } pub struct ReverseChildrenIterator<ConcreteNode> where ConcreteNode: LayoutNode { current: Option<ConcreteNode>, } impl<ConcreteNode> Iterator for ReverseChildrenIterator<ConcreteNode> where ConcreteNode: LayoutNode { type Item = ConcreteNode; fn next(&mut self) -> Option<ConcreteNode> { let node = self.current; self.current = node.and_then(|node| node.prev_sibling()); node } } pub struct TreeIterator<ConcreteNode> where ConcreteNode: LayoutNode { stack: Vec<ConcreteNode>, } impl<ConcreteNode> TreeIterator<ConcreteNode> where ConcreteNode: LayoutNode { fn new(root: ConcreteNode) -> TreeIterator<ConcreteNode> { let mut stack = vec![]; stack.push(root); TreeIterator { stack: stack, } } pub fn next_skipping_children(&mut self) -> Option<ConcreteNode> { self.stack.pop() } } impl<ConcreteNode> Iterator for TreeIterator<ConcreteNode> where ConcreteNode: LayoutNode { type Item = ConcreteNode; fn next(&mut self) -> Option<ConcreteNode> { let ret = self.stack.pop(); ret.map(|node| self.stack.extend(node.rev_children())); ret } } /// A thread-safe version of `LayoutNode`, used during flow construction. This type of layout /// node does not allow any parents or siblings of nodes to be accessed, to avoid races. pub trait ThreadSafeLayoutNode: Clone + Copy + Debug + GetLayoutData + NodeInfo + PartialEq + Sized { type ConcreteNode: LayoutNode<ConcreteThreadSafeLayoutNode = Self>; type ConcreteThreadSafeLayoutElement: ThreadSafeLayoutElement<ConcreteThreadSafeLayoutNode = Self> + ::selectors::Element<Impl=SelectorImpl>; type ChildrenIterator: Iterator<Item = Self> + Sized; /// Converts self into an `OpaqueNode`. fn opaque(&self) -> OpaqueNode; /// Returns the type ID of this node. /// Returns `None` if this is a pseudo-element; otherwise, returns `Some`. fn type_id(&self) -> Option<LayoutNodeType>; /// Returns the type ID of this node, without discarding pseudo-elements as /// `type_id` does. fn type_id_without_excluding_pseudo_elements(&self) -> LayoutNodeType; /// Returns the style for a text node. This is computed on the fly from the /// parent style to avoid traversing text nodes in the style system. /// /// Note that this does require accessing the parent, which this interface /// technically forbids. But accessing the parent is only unsafe insofar as /// it can be used to reach siblings and cousins. A simple immutable borrow /// of the parent data is fine, since the bottom-up traversal will not process /// the parent until all the children have been processed. fn style_for_text_node(&self) -> Arc<ServoComputedValues>; #[inline] fn is_element_or_elements_pseudo(&self) -> bool { match self.type_id_without_excluding_pseudo_elements() { LayoutNodeType::Element(..) => true, _ => false, } } fn get_before_pseudo(&self) -> Option<Self> { self.as_element().and_then(|el| el.get_before_pseudo()).map(|el| el.as_node()) } fn get_after_pseudo(&self) -> Option<Self> { self.as_element().and_then(|el| el.get_after_pseudo()).map(|el| el.as_node()) } fn get_details_summary_pseudo(&self) -> Option<Self> { self.as_element().and_then(|el| el.get_details_summary_pseudo()).map(|el| el.as_node()) } fn get_details_content_pseudo(&self) -> Option<Self> { self.as_element().and_then(|el| el.get_details_content_pseudo()).map(|el| el.as_node()) } fn debug_id(self) -> usize; /// Returns an iterator over this node's children. fn children(&self) -> LayoutIterator<Self::ChildrenIterator>; /// Returns a ThreadSafeLayoutElement if this is an element, None otherwise. #[inline] fn as_element(&self) -> Option<Self::ConcreteThreadSafeLayoutElement>; #[inline] fn get_pseudo_element_type(&self) -> PseudoElementType<Option<display::T>> { self.as_element().map_or(PseudoElementType::Normal, |el| el.get_pseudo_element_type()) } fn get_style_and_layout_data(&self) -> Option<OpaqueStyleAndLayoutData>; fn style(&self, context: &SharedStyleContext) -> Arc<ServoComputedValues> { if let Some(el) = self.as_element() { el.style(context) } else { debug_assert!(self.is_text_node()); self.style_for_text_node() } } fn selected_style(&self) -> Arc<ServoComputedValues> { if let Some(el) = self.as_element() { el.selected_style() } else { debug_assert!(self.is_text_node()); self.style_for_text_node() } } fn is_ignorable_whitespace(&self, context: &SharedStyleContext) -> bool; /// Returns true if this node contributes content. This is used in the implementation of /// `empty_cells` per CSS 2.1 § 17.6.1.1. fn is_content(&self) -> bool { self.type_id().is_some() } /// Returns access to the underlying LayoutNode. This is breaks the abstraction /// barrier of ThreadSafeLayout wrapper layer, and can lead to races if not used /// carefully. /// /// We need this because the implementation of some methods need to access the layout /// data flags, and we have this annoying trait separation between script and layout :-( unsafe fn unsafe_get(self) -> Self::ConcreteNode; fn can_be_fragmented(&self) -> bool; fn node_text_content(&self) -> String; /// If the insertion point is within this node, returns it. Otherwise, returns `None`. fn selection(&self) -> Option<Range<ByteIndex>>; /// If this is an image element, returns its URL. If this is not an image element, fails. fn image_url(&self) -> Option<ServoUrl>; fn canvas_data(&self) -> Option<HTMLCanvasData>; fn svg_data(&self) -> Option<SVGSVGData>; /// If this node is an iframe element, returns its pipeline ID. If this node is /// not an iframe element, fails. fn iframe_pipeline_id(&self) -> PipelineId; fn get_colspan(&self) -> u32; fn get_rowspan(&self) -> u32; fn fragment_type(&self) -> FragmentType { match self.get_pseudo_element_type() { PseudoElementType::Normal => FragmentType::FragmentBody, PseudoElementType::Before(_) => FragmentType::BeforePseudoContent, PseudoElementType::After(_) => FragmentType::AfterPseudoContent, PseudoElementType::DetailsSummary(_) => FragmentType::FragmentBody, PseudoElementType::DetailsContent(_) => FragmentType::FragmentBody,<|fim▁hole|> } fn scroll_root_id(&self) -> ScrollRootId { ScrollRootId::new_of_type(self.opaque().id() as usize, self.fragment_type()) } } // This trait is only public so that it can be implemented by the gecko wrapper. // It can be used to violate thread-safety, so don't use it elsewhere in layout! #[allow(unsafe_code)] pub trait DangerousThreadSafeLayoutNode: ThreadSafeLayoutNode { unsafe fn dangerous_first_child(&self) -> Option<Self>; unsafe fn dangerous_next_sibling(&self) -> Option<Self>; } pub trait ThreadSafeLayoutElement: Clone + Copy + Sized + Debug + ::selectors::Element<Impl=SelectorImpl> + GetLayoutData + PresentationalHintsSynthetizer { type ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode<ConcreteThreadSafeLayoutElement = Self>; fn as_node(&self) -> Self::ConcreteThreadSafeLayoutNode; /// Creates a new `ThreadSafeLayoutElement` for the same `LayoutElement` /// with a different pseudo-element type. fn with_pseudo(&self, pseudo: PseudoElementType<Option<display::T>>) -> Self; /// Returns the type ID of this node. /// Returns `None` if this is a pseudo-element; otherwise, returns `Some`. fn type_id(&self) -> Option<LayoutNodeType>; /// Returns access to the underlying TElement. This is breaks the abstraction /// barrier of ThreadSafeLayout wrapper layer, and can lead to races if not used /// carefully. /// /// We need this so that the functions defined on this trait can call /// lazily_compute_pseudo_element_style, which operates on TElement. unsafe fn unsafe_get(self) -> <<Self::ConcreteThreadSafeLayoutNode as ThreadSafeLayoutNode>::ConcreteNode as TNode>::ConcreteElement; #[inline] fn get_attr(&self, namespace: &Namespace, name: &LocalName) -> Option<&str>; fn get_style_data(&self) -> Option<&AtomicRefCell<ElementData>>; #[inline] fn get_pseudo_element_type(&self) -> PseudoElementType<Option<display::T>>; #[inline] fn get_before_pseudo(&self) -> Option<Self> { if self.get_style_data() .unwrap() .borrow() .styles().pseudos .contains_key(&PseudoElement::Before) { Some(self.with_pseudo(PseudoElementType::Before(None))) } else { None } } #[inline] fn get_after_pseudo(&self) -> Option<Self> { if self.get_style_data() .unwrap() .borrow() .styles().pseudos .contains_key(&PseudoElement::After) { Some(self.with_pseudo(PseudoElementType::After(None))) } else { None } } #[inline] fn get_details_summary_pseudo(&self) -> Option<Self> { if self.get_local_name() == &local_name!("details") && self.get_namespace() == &ns!(html) { Some(self.with_pseudo(PseudoElementType::DetailsSummary(None))) } else { None } } #[inline] fn get_details_content_pseudo(&self) -> Option<Self> { if self.get_local_name() == &local_name!("details") && self.get_namespace() == &ns!(html) { let display = if self.get_attr(&ns!(), &local_name!("open")).is_some() { None // Specified by the stylesheet } else { Some(display::T::none) }; Some(self.with_pseudo(PseudoElementType::DetailsContent(display))) } else { None } } /// Returns the style results for the given node. If CSS selector matching /// has not yet been performed, fails. /// /// Unlike the version on TNode, this handles pseudo-elements. #[inline] fn style(&self, context: &SharedStyleContext) -> Arc<ServoComputedValues> { match self.get_pseudo_element_type() { PseudoElementType::Normal => self.get_style_data().unwrap().borrow() .styles().primary.values().clone(), other => { // Precompute non-eagerly-cascaded pseudo-element styles if not // cached before. let style_pseudo = other.style_pseudo_element(); match style_pseudo.cascade_type() { // Already computed during the cascade. PseudoElementCascadeType::Eager => {}, PseudoElementCascadeType::Precomputed => { if !self.get_style_data() .unwrap() .borrow() .styles().pseudos.contains_key(&style_pseudo) { let mut data = self.get_style_data().unwrap().borrow_mut(); let new_style = context.stylist.precomputed_values_for_pseudo( &style_pseudo, Some(data.styles().primary.values()), CascadeFlags::empty()); data.styles_mut().pseudos .insert(style_pseudo.clone(), new_style); } } PseudoElementCascadeType::Lazy => { if !self.get_style_data() .unwrap() .borrow() .styles().pseudos.contains_key(&style_pseudo) { let mut data = self.get_style_data().unwrap().borrow_mut(); let new_style = context.stylist .lazily_compute_pseudo_element_style( unsafe { &self.unsafe_get() }, &style_pseudo, data.styles().primary.values()); data.styles_mut().pseudos .insert(style_pseudo.clone(), new_style.unwrap()); } } } self.get_style_data().unwrap().borrow() .styles().pseudos.get(&style_pseudo) .unwrap().values().clone() } } } #[inline] fn selected_style(&self) -> Arc<ServoComputedValues> { let data = self.get_style_data().unwrap().borrow(); data.styles().pseudos .get(&PseudoElement::Selection).map(|s| s) .unwrap_or(&data.styles().primary) .values().clone() } /// Returns the already resolved style of the node. /// /// This differs from `style(ctx)` in that if the pseudo-element has not yet /// been computed it would panic. /// /// This should be used just for querying layout, or when we know the /// element style is precomputed, not from general layout itself. #[inline] fn resolved_style(&self) -> Arc<ServoComputedValues> { let data = self.get_style_data().unwrap().borrow(); match self.get_pseudo_element_type() { PseudoElementType::Normal => data.styles().primary.values().clone(), other => data.styles().pseudos .get(&other.style_pseudo_element()).unwrap().values().clone(), } } }<|fim▁end|>
}
<|file_name|>LoginController.java<|end_file_name|><|fim▁begin|>package fr.pizzeria.admin.web; import java.io.IOException; import javax.servlet.RequestDispatcher; import javax.servlet.ServletException; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; import org.apache.commons.lang3.StringUtils; @WebServlet("/login") public class LoginController extends HttpServlet { @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { RequestDispatcher rd = this.getServletContext().getRequestDispatcher("/WEB-INF/views/login/login.jsp"); rd.forward(req, resp); } @Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { String email = req.getParameter("email"); String password = req.getParameter("password"); if (StringUtils.isBlank(email) || StringUtils.isBlank(password)) { resp.sendError(400, "Non non non ! Zone interdite"); } else if ( StringUtils.equals(email, "[email protected]") && StringUtils.equals(password, "admin")) { HttpSession session = req.getSession(); session.setAttribute("email", email); resp.sendRedirect(req.getContextPath() + "/pizzas/list"); } else { resp.setStatus(403); req.setAttribute("msgerr", "Ooppps noooo"); RequestDispatcher rd = this.getServletContext().getRequestDispatcher("/WEB-INF/views/login/login.jsp"); rd.forward(req, resp); } } <|fim▁hole|><|fim▁end|>
}
<|file_name|>backup_main_commands_test.py<|end_file_name|><|fim▁begin|>import unittest import config_test from backupcmd.commands import backupCommands class BackupCommandsTestCase(unittest.TestCase):<|fim▁hole|> self.assertEqual(1,1)<|fim▁end|>
"""Test commands passed to main script""" def test_hyphen_r_option(self): print 'Pending BackupCommandsTestCase'
<|file_name|>th_logger.py<|end_file_name|><|fim▁begin|>#!C:\Python27\ """th_logger.py holds logging handler and config for the Regression test""" import logging from testProperty import TEST_OUTPUT_PATH<|fim▁hole|>test_logger = logging.getLogger('TEST_HARNESS') handler = logging.FileHandler(TEST_OUTPUT_PATH + 'runTest.log') formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-6s %(message)s') handler.setFormatter(formatter) test_logger.addHandler(handler) test_logger.setLevel(logging.DEBUG)<|fim▁end|>
<|file_name|>variant-struct.rs<|end_file_name|><|fim▁begin|>pub enum Foo {<|fim▁hole|><|fim▁end|>
Bar { qux: (), } }
<|file_name|>TileGenericMachine.java<|end_file_name|><|fim▁begin|>/* * This file is part of TechReborn, licensed under the MIT License (MIT). * * Copyright (c) 2018 TechReborn * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package techreborn.tiles; import net.minecraft.block.Block; import net.minecraft.entity.player.EntityPlayer; import net.minecraft.item.ItemStack; import net.minecraft.util.EnumFacing; import reborncore.api.IToolDrop; import reborncore.api.recipe.IRecipeCrafterProvider; import reborncore.api.tile.IInventoryProvider; import reborncore.common.powerSystem.TilePowerAcceptor; import reborncore.common.recipes.RecipeCrafter; import reborncore.common.util.Inventory; /** * @author drcrazy * */ public abstract class TileGenericMachine extends TilePowerAcceptor implements IToolDrop, IInventoryProvider, IRecipeCrafterProvider{ public String name; public int maxInput; public int maxEnergy; public Block toolDrop; public int energySlot; public Inventory inventory; public RecipeCrafter crafter; /** * @param name String Name for a tile. Do we need it at all? * @param maxInput int Maximum energy input, value in EU * @param maxEnergy int Maximum energy buffer, value in EU * @param toolDrop Block Block to drop with wrench * @param energySlot int Energy slot to use to charge machine from battery */ public TileGenericMachine(String name, int maxInput, int maxEnergy, Block toolDrop, int energySlot) { this.name = "Tile" + name; this.maxInput = maxInput; this.maxEnergy = maxEnergy; this.toolDrop = toolDrop; this.energySlot = energySlot; checkTeir(); } public int getProgressScaled(final int scale) { if (crafter != null && crafter.currentTickTime != 0) { return crafter.currentTickTime * scale / crafter.currentNeededTicks; } return 0; } // TilePowerAcceptor<|fim▁hole|> super.update(); if (!world.isRemote) { charge(energySlot); } } @Override public double getBaseMaxPower() { return maxEnergy; } @Override public boolean canAcceptEnergy(final EnumFacing direction) { return true; } @Override public boolean canProvideEnergy(final EnumFacing direction) { return false; } @Override public double getBaseMaxOutput() { return 0; } @Override public double getBaseMaxInput() { return maxInput; } // IToolDrop @Override public ItemStack getToolDrop(EntityPlayer p0) { return new ItemStack(toolDrop, 1); } // IInventoryProvider @Override public Inventory getInventory() { return inventory; } // IRecipeCrafterProvider @Override public RecipeCrafter getRecipeCrafter() { return crafter; } }<|fim▁end|>
@Override public void update() {
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>mod fasta { include!("../../fuzz_targets/_fasta.rs"); } mod fastq { include!("../../fuzz_targets/_fastq.rs");<|fim▁hole|>use std::env::args; use std::fs::File; use std::io::Read; fn main() { let mut data = vec![]; let filename = args().skip(1).next().unwrap().as_str().to_string(); File::open(&filename) .unwrap() .read_to_end(&mut data) .expect("could not open file"); let data = data.as_slice(); println!( "data: {:?}\n{:?}'", data, String::from_utf8(data.to_owned()) ); fasta::evaluate(data); fastq::evaluate(data); }<|fim▁end|>
}
<|file_name|>ret_dv.cpp<|end_file_name|><|fim▁begin|># include "stdafx.h" /*************************************************************************** ** ** INVOCATION NAME: RET123DV ** ** PURPOSE: TO RETURN DELIMITED DATA VALUES STORED IN STRUCTURE ** ** INVOCATION METHOD: RET123DV(B_PTR,PRIM_DMS) ** ** ARGUMENT LIST: ** NAME TYPE USE DESCRIPTION ** B_PTR[] PTR I CHARACTER POINTER TO BUFFER ** PRIM_DMS INT I PRIMARY DIMENSION LENGTH ** RET123DV() LOGICAL O SUCCESS FLAG ** ** EXTERNAL FUNCTION REFERENCES: ** NAME DESCRIPTION ** STR123TOK() RETURNS A POINTER TO A STRING TOKEN ** ** INTERNAL VARIABLES: ** NAME TYPE DESCRIPTION ** LEN INT CHARACTER STRING LENGTH ** NEW_DV PTR POINTER TO DATA VALUE STRUCTURE ** ST_TMPBUF[] PTR START OF TEMPORARY BUFFER ** TMP_BUF[] PTR TEMPORARY BUFFER ** TMP_STR[] PTR TEMPORARY CHARACTER STRING ** TOK_LEN LONG LENGTH OF TOKEN RETURNED FROM STR123TOK ** VAL_CT INT NUMBER OF CURRENT VALUES IN SET ** VAL_STR[] PTR VALUE CHARACTER STRING ** ** GLOBAL REFERENCES: ** ** DATA RECORD STRUCTURE (DR) ** NAME TYPE USE DESCRIPTION ** TAG[10] CHAR N/A INTERNAL NAME OF AN ASSOCIATED FIELD ** FD_LEN INT N/A LENGTH OF DISCRIPTIVE AREA DATA RECORD ** FD_POS INT N/A POSITION OF DESCRIPTIVE AREA DATA ** RECORD ** NUM_DIM INT N/A NUMBER OF DIMENSIONS (NO LABELS) ** DIM_LPTR PTR N/A HEAD POINTER TO DIMENSION LENGTHS ** (NO LABELS) ** VALUES PTR O HEAD POINTER TO DATA VALUE SUBFIELD ** RECORDS ** NEXT PTR N/A POINTER TO NEXT DATA RECORD ** ** DR DATA VALUE SUBFIELD STRUCTURE (DV) ** NAME TYPE USE DESCRIPTION ** VALUE CHAR O DATA VALUE ** NXT_VSET PTR O POINTER TO NEXT SET OF DATA VALUES ** NXT_VAL PTR O POINTER TO NEXT DATA VALUE SUBFIELD ** RECORD ** ** FILE MANAGEMENT STRUCTURE (FM) ** NAME TYPE USE DESCRIPTION ** FP PTR N/A FILE POINTER ** F_NAME[] PTR N/A EXTERNAL FILE NAME ** OPEN_MODE CHAR N/A OPEN MODE OF FILE ** CR_HD PTR N/A HEAD POINTER TO DATA DESCRIPTIVE FILE ** CONTROL RECORD STRUCTURE ** DD_HD PTR N/A HEAD POINTER TO DATA DESCRIPTIVE ** RECORD STRUCTURE ** DL_HD PTR N/A HEAD POINTER TO DATA DESCRIPTIVE LEADER ** STRUCTURE ** DR_HD PTR N/A HEAD POINTER TO DATA RECORD STRUCTURE ** LP_HD PTR N/A HEAD POINTER TO LABELS POINTER STRUCTURE ** RL_HD PTR N/A HEAD POINTER TO DATA RECORD LEADER ** STRUCTURE ** RS_HD PTR N/A HEAD POINTER TO FORMAT REPETITION STACK ** STRUCTURE ** REC_LOC_HD PTR N/A HEAD POINTER TO RECORD POINTER STRUCTURE ** CUR_DD PTR N/A CURRENT POINTER TO DATA DESCRIPTIVE ** RECORD STRUCTURE ENTRY ** CUR_DM PTR N/A CURRENT POINTER TO DIMENSION LENGTHS ** STRUCTURE ENTRY ** CUR_DR PTR I CURRENT POINTER TO DATA RECORD STRUCTURE ** ENTRY ** CUR_DV PTR I/O CURRENT POINTER TO DR DATA VALUE SUBFIELD ** STRUCTURE ENTRY ** ROW_DVH PTR N/A CURRENT POINTER TO NEXT SET OF VALUES ** IN DR DATA VALUE SUBFIELD STRUCTURE ENTRY ** CUR_FC PTR N/A CURRENT POINTER TO FORMAT CONTROLS ** STRUCTURE ENTRY ** CUR_LP PTR N/A CURRENT POINTER TO LABELS POINTER ** STRUCTURE ENTRY ** CUR_SL PTR N/A CURRENT POINTER TO DD-LABEL SUBFIELD ** STRUCTURE ENTRY ** CUR_FCR PTR N/A CURRENT POINTER TO ROOT OF FORMAT ** CONTROLS STRUCTURE ** CUR_RP PTR N/A CURRENT POINTER TO RECORD POINTER ** STRUCTURE ** NLD_RP PTR N/A POINTER TO RECORD POINTER STRUCTURE ** WHERE CORRESPONDING DR HAS AN 'R' ** LEADER IDENTIFIER ** SF_FMT CHAR N/A FORMAT CORRESPONDING TO THE CURRENT ** DATA VALUE ** NLD INT N/A NO LEADER FLAG ** TRUE - NO LEADER ** FALSE - LEADER EXISTS ** BIT_CNT INT N/A COUNT OF BITS STORED IN MEMORY FOR ** A SUBSEQUENT READ FUNCTION ** BIT_BIN CHAR N/A BITS STORED IN MEMORY FOR SUBSEQUENT ** READ FUNCTION ** COMPRESS INT N/A FLAG TO SPECIFY COMPRESSED OR ** UNCOMPRESSED ADJACENT FIXED-LENGTH ** BIT FIELD I/O ** 0 - UNCOMPRESSED ** 1 - COMPRESSED ** SF_STATE_DD INT N/A SUBFIELD STATE (DD) ** 1 - FIELD CONTROL SUBFIELD ** 2 - NAME SUBFIELD ** 3 - LABELS SUBFIELD ** 4 - FORMATS SUBFIELD ** 5 - FINISHED ** SF_STATE_DR INT N/A SUBFIELD STATE (DR) ** 1 - NUMBER OF DIMENSIONS SUBFIELD ** 2 - LENGTH OF A DIMENSION SUBFIELD ** 3 - DATA VALUE STRING SUBFIELD ** 4 - FINISHED ** NEXT PTR N/A POINTER TO NEXT FILE MANAGEMENT ** STRUCTURE ** BACK PTR N/A POINTER TO PREVIOUS FILE MANAGEMENT ** STRUCTURE ** ** GLOBAL VARIABLES: ** NAME TYPE USE DESCRIPTION ** CUR_FM PTR I CURRENT POINTER TO FILE MANAGEMENT ** STRUCTURE ENTRY ** ** GLOBAL CONSTANTS: ** NAME TYPE DESCRIPTION ** DEL_STR[3] CHAR CHARACTER STRING CONTAINING THE FIELD AND ** UNIT TERMINATORS ** FT CHAR FIELD TERMINATOR (RS) 1/14 ** NC CHAR NULL CHARACTER ** UT_STR[2] CHAR CHARACTER STRING CONTAINING THE UNIT ** TERMINATOR ** ** CHANGE HISTORY: ** AUTHOR CHANGE_ID DATE CHANGE SUMMARY ** A. DEWITT 04/23/90 INITIAL PROLOG ** A. DEWITT 04/23/90 INITIAL PDL ** P. HODGES 06/06/90 INITIAL CODE ** A. DEWITT 06/25/90 INCLUDED DIMENSION ROW_DVH LOGIC ** L. MCMILLION 10/16/90 REPLACED CALLS TO LIBRARY FUNCTION ** STRTOK() WITH STR123TOK() DUE TO ** NESTING PROBLEMS ** J. TAYLOR 92DR005 05/20/92 CHANGED CALLING SEQUENCE TO ** STR123TOK TO RETURN TOKEN LENGTH ** J. TAYLOR 92DR012 10/30/92 MODIFIED TO REMOVE FT FROM VAL_STR ** BEFORE RETURNING ** L. MCMILLION 93DR027 04/02/93 CHANGED DELIMITER USED BY FIRST ** STR123TOK() CALL FROM UT_STR TO ** DEL_STR FOR VECTOR DATA ** L. MCMILLION 93DR033 07/23/93 UPDATED DR REFERENCE IN PROLOG ** ** PDL: ** ** SET STRING LENGTH TO LENGTH OF BUFFER ** ALLOCATE TEMPORARY BUFFER ** INITIALIZE TEMPORARY BUFFER TO NULL CHARACTER ** COPY BUFFER CONTENTS TO TEMPORARY BUFFER ** SET START OF TEMPORARY BUFFER TO BEGINNING OF TEMPORARY BUFFER ** ALLOCATE NEW_DV { SET UP DUMMY HEADER } ** SET NXT_VAL FIELD OF NEW_DV TO NULL ** SET NXT_VSET FIELD OF NEW_DV TO NULL ** SET VALUES IN CUR_DR TO NEW_DV ** SET ROW_DVH TO NEW_DV ** CALL STR123TOK() TO SEPERATE THE DELIMITED DATA AND RETURN VAL_STR ** WHILE THERE ARE MORE DATA VALUES DO ** ALLOCATE NEW_DV ** SET NXT_VAL TO NULL ** SET NXT_VSET TO NULL ** SET LENGTH TO LENGTH OF VALUE STRING PLUS 1 ** ALLOCATE TEMPORARY STRING SPACE ** SET TEMPORARY STRING TO VALUE STRING ** SET NEW_DV VALUE TO TEMPORARY STRING ** SET NXT_VSET OF ROW_DVH TO NEW_DV ** SET CUR_DV TO NEW_DV ** SET ROW_DVH TO NEW_DV ** SET VAL_CT TO 1 ** CALL STR123TOK() TO RETURN NEXT VAL_STR ** WHILE THERE ARE MORE DATA VALUES AND VAL_CT IS LESS ** THAN PRIM_DMS DO ** ALLOCATE NEW_DV ** SET NXT_VAL TO NULL ** SET NXT_VSET TO NULL ** SET LENGTH TO LENGTH OF VALUE STRING PLUS 1 ** ALLOCATE TEMPORARY STRING SPACE ** SET TEMPORARY STRING TO VALUE STRING ** SET NEW_DV VALUE TO TEMPORARY STRING ** SET CUR_DV TO NEW_DV ** INCREMENT VAL_CT ** CALL STR123TOK() TO RETURN NEXT VAL_STR ** IF VAL_STR IS TERMINATED BY FT, REMOVE IT ** END WHILE ** END WHILE ** FREE SPACE AT START OF TEMPORARY BUFFER ** ** RETURN SUCCESS ** ****************************************************************************** ** CODE SECTION ** ******************************************************************************/ #include "stc123.h" int ret123dv(char *b_ptr,long prim_dms) { /* INTERNAL VARIABLES */ struct dv *new_dv ; long val_ct ; long tok_len ; size_t len ; char *st_tmpbuf, *tmp_buf , *tmp_str , *val_str ; /* SET STRING LENGTH TO LENGTH OF BUFFER */ len = _tcslen(b_ptr) + 1; /* ALLOCATE TEMPORARY BUFFER */ if ((tmp_buf = (char *) malloc((size_t) (len * sizeof(char)))) == NULL) return(0); /* INITIALIZE TEMPORARY BUFFER TO NULL CHARACTER */ *tmp_buf = NC; /* COPY BUFFER CONTENTS TO TEMPORARY BUFFER */ strcpy(tmp_buf,b_ptr); /* SET START OF TEMPORARY BUFFER TO BEGINNING OF TEMPORARY BUFFER */ st_tmpbuf = tmp_buf; /* ALLOCATE NEW_DV { SET UP DUMMY HEADER } */ if((new_dv = (struct dv *) malloc(sizeof(struct dv))) == NULL) return(0); /* SET VALUE FIELD TO NULL */ new_dv->value = NULL; /* SET NXT_VAL FIELD OF NEW_DV TO NULL */ new_dv->nxt_val = NULL; /* SET NXT_VSET FIELD OF NEW_DV TO NULL */ new_dv->nxt_vset = NULL; /* SET VALUES IN CUR_DR TO NEW_DV */ cur_fm->cur_dr->values = new_dv; /* SET ROW_DVH TO NEW_DV */ cur_fm->row_dvh = new_dv; /* CALL STR123TOK() TO SEPARATE THE DELIMITED DATA AND RETURN VAL_STR */ val_str = str123tok(&tmp_buf,DEL_STR,&tok_len); /* WHILE THERE ARE MORE DATA VALUES DO */ while(val_str != NULL) { /* ALLOCATE NEW_DV */ if ((new_dv = (struct dv *) malloc(sizeof(struct dv))) == NULL) return(0); /* SET VALUE POINTER TO NULL */ new_dv->value = NULL; /* SET NXT_VAL TO NULL */ new_dv->nxt_val = NULL; /* SET NXT_VSET TO NULL */ new_dv->nxt_vset = NULL; /* SET LENGTH TO LENGTH OF VALUE STRING PLUS 1 */ len = (size_t) _tcslen(val_str) + 1; /* ALLOCATE TEMPORARY STRING SPACE */ if ((tmp_str = (char *) malloc(len * sizeof(char))) == NULL) return(0); /* INITIALIZE STRING */ tmp_str[0] = NC; /* SET TEMPORARY STRING TO VALUE STRING */ strcpy(tmp_str, val_str); /* SET NEW_DV VALUE TO TEMPORARY STRING */ new_dv->value = tmp_str; <|fim▁hole|> cur_fm->row_dvh->nxt_vset = new_dv; /* SET CUR_DV TO NEW_DV */ cur_fm->cur_dv = new_dv; /* SET ROW_DVH TO NEW_DV */ cur_fm->row_dvh = new_dv; /* SET VAL_CT TO 1 */ val_ct = 1; /* CALL STR123TOK() TO RETURN NEXT VAL_STR */ val_str = str123tok(&tmp_buf,DEL_STR,&tok_len); /* WHILE THERE ARE MORE DATA VALUES AND VAL_CT IS LESS THAN PRIM_DMS DO */ while(val_str != NULL && val_ct < prim_dms) { /* ALLOCATE NEW_DV */ if ((new_dv = (struct dv *) malloc(sizeof(struct dv))) == NULL) return(0); /* SET VALUE FIELD TO NULL */ new_dv->value = NULL; /* SET NXT_VAL TO NULL */ new_dv->nxt_val = NULL; /* SET NXT_VSET TO NULL */ new_dv->nxt_vset = NULL; /* SET LENGTH TO LENGTH OF VALUE STRING PLUS 1 */ len = (size_t) _tcslen(val_str) + 1; /* ALLOCATE TEMPORARY STRING SPACE */ if ((tmp_str = (char *) malloc(len * sizeof(char))) == NULL) return(0); /* INITIALIZE STRING */ tmp_str[0] = NC; /* SET TEMPORARY STRING TO VALUE STRING */ strcpy(tmp_str, val_str); /* SET NEW_DV VALUE TO TEMPORARY STRING */ new_dv->value = tmp_str; /* SET NXT_VAL OF CUR_DV TO NEW_DV */ cur_fm->cur_dv->nxt_val = new_dv; /* SET CUR_DV TO NEW_DV */ cur_fm->cur_dv = new_dv; /* INCREMENT VAL_CT */ val_ct++; /* CALL STR123TOK() TO RETURN NEXT VAL_STR */ val_str = str123tok(&tmp_buf,UT_STR,&tok_len); /* IF VAL_STR IS TERMINATED BY FT, REMOVE IT */ if (val_str != NULL) { /* SET LENGTH TO LENGTH OF VALUE STRING */ len = (size_t) _tcslen(val_str); if (val_str[len-1] == FT) { val_str[len-1] = NC; len--; } } } } /* FREE SPACE AT START OF TEMPORARY BUFFER */ free(st_tmpbuf); /* RETURN SUCCESS */ return(1); }<|fim▁end|>
/* SET NXT_VSET OF ROW_DVH TO NEW_DV */
<|file_name|>simpleweapon.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Copyright Tom SF Haines # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import posixpath import random import math from bin.shared import ray_cast from bin.shared import csp from direct.actor.Actor import Actor from direct.interval.IntervalGlobal import * from direct.interval.ActorInterval import ActorInterval from panda3d.core import * from panda3d.ode import * class SimpleWeapon: """Provides a simple weapon system - not very sophisticaed, but good enough to test shooting things.""" def __init__(self,manager,xml): self.gunView = render.attachNewNode('gun-view') self.ray = None self.reload(manager,xml) def destroy(self): self.gunView.removeNode() if self.ray!=None: self.ray.destroy() def reload(self,manager,xml): # Get the path to load weapons from... basePath = manager.get('paths').getConfig().find('weapons').get('path') # Variables to manage the firing state (Used G36 as reference for defaults.)... bullet = xml.find('bullet') if bullet!=None: self.bulletRate = float(bullet.get('rate',1.0/12.5)) self.bulletSpeed = float(bullet.get('speed',920.0)) self.bulletWeight = float(bullet.get('mass',0.004)) else: self.bulletRate = 1.0/12.5 self.bulletSpeed = 920.0 self.bulletWeight = 0.004 # Determine the weapon meshes path... self.meshPath = posixpath.join(basePath, xml.find('egg').get('file')) # Get the camera interface, so we can zoom in when the player aims... self.camera = manager.get(xml.find('camera').get('plugin')) <|fim▁hole|> self.gunView.reparentTo(manager.get(parent.get('plugin')).getNode(parent.get('node'))) # Create a ray cast to detect what the player is looking at... and what will be shot... self.space = manager.get('ode').getSpace() if self.ray!=None: self.ray.destroy() self.ray = OdeRayGeom(100.0) self.ray.setCategoryBits(BitMask32(0xfffffffe)) self.ray.setCollideBits(BitMask32(0xfffffffe)) # Get all the stuff we need to do the muzzle flash particle effect... flash = xml.find('muzzle_flash') self.flashManager = manager.get(flash.get('plugin')) self.flashEffect = flash.get('effect') self.flashBone = flash.get('bone') # Will be swapped out for the actual node latter. self.flashPos = csp.getPos(flash.get('pos')) # Get all the stuff we need to do the bullet hit sparks effect... sparks = xml.find('sparks') self.sparksManager = manager.get(sparks.get('plugin')) self.sparksEffect = sparks.get('effect') # Create a quaternion that rotates +ve z to +ve y - used to point it in the weapon direction rather than up... self.zToY = Quat() self.zToY.setFromAxisAngle(-90.0,Vec3(1.0,0.0,0.0)) # State for the animation... self.state = False # False==casual, True==aim. self.nextState = False # Firing state... self.firing = False # True if the trigger is being held. self.triggerTime = 0.0 # How long the trigger has been held for, so we know when to eject ammo. # For bullet holes bh = xml.find('bullet_holes') if bh != None: self.bulletHoles = manager.get(bh.get('plugin')) else: self.bulletHoles = None def postInit(self): for i in self.postReload(): yield i def postReload(self): # Load the actor... self.mesh = Actor(self.meshPath) yield # Shader generator makes it shiny, plus we need it in the right places in the render graph... self.mesh.setShaderAuto() self.mesh.reparentTo(self.gunView) self.mesh.hide() yield # Set its animation going... except we pause it until needed... self.nextAni() self.interval.pause() # Gun flash requires an exposed bone... self.flashBone = self.mesh.exposeJoint(None,"modelRoot",self.flashBone) yield def gunControl(self,task): # Update the gun direction ray to follow the players view... self.ray.setPosition(self.gunView.getPos(render)) self.ray.setQuaternion(self.zToY.multiply(self.gunView.getQuat(render))) # If the gun is firing update the trigger time, if a bullet is ejected do the maths... if self.firing: dt = globalClock.getDt() self.triggerTime += dt while self.triggerTime>self.bulletRate: self.triggerTime -= self.bulletRate hit,pos,norm = ray_cast.nearestHit(self.space,self.ray) # Create a muzzle flash effect... self.flashManager.doEffect(self.flashEffect, self.flashBone, True, self.flashPos) if hit: # Create an impact sparks effect... # Calculate the reflection direction... rd = self.ray.getDirection() sparkDir = (norm * (2.0*norm.dot(rd))) - rd # Convert the reflection direction into a quaternion that will rotate +ve z to the required direction... try: ang = -math.acos(sparkDir[2]) except: print 'Angle problem', sparkDir ang = 0.0 axis = Vec3(0.0,0.0,1.0).cross(sparkDir) axis.normalize() sparkQuat = Quat() sparkQuat.setFromAxisAngleRad(ang,axis) # Set it going... self.sparksManager.doEffect(self.sparksEffect, render, False, pos, sparkQuat) # Make a bullet hole if hit.hasBody() and isinstance(hit.getBody().getData(), NodePath): self.bulletHoles.makeNew(pos, norm, hit.getBody().getData()) else: self.bulletHoles.makeNew(pos, norm, None) # Impart some energy on the object... if hit and hit.hasBody(): body = hit.getBody() # Calculate the force required to supply the energy the bullet contains to the body... force = self.bulletWeight*self.bulletSpeed/0.05 # Get the direction of travel of the bullet, multiply by force... d = self.ray.getDirection() d *= force # If the object is asleep awaken it... if not body.isEnabled(): body.enable() # Add the force to the object... body.addForceAtPos(d,pos) return task.cont def start(self): # Make the gun visible... self.mesh.show() # Set the gun animation going... self.interval.finish() # Weapon task - this primarily makes it shoot... self.task = taskMgr.add(self.gunControl,'GunControl') def stop(self): self.interval.pause() self.mesh.hide() taskMgr.remove(self.task) def nextAni(self): self.state = self.nextState if self.state: ani = random.choice(('aim_wiggle_a','aim_wiggle_b','aim_wiggle_c')) else: ani = random.choice(('casual_wiggle_a','casual_wiggle_b','casual_wiggle_c')) self.mesh.pose(ani,0) self.interval = Sequence(self.mesh.actorInterval(ani),Func(self.nextAni)) self.interval.start() def setAiming(self,s): if self.nextState!=s: self.interval.pause() self.nextState = s self.camera.setZoomed(s) def wib(): self.interval.finish() if s: ani = 'casual_aim' else: ani = 'aim_casual' transition = Sequence(self.mesh.actorInterval(ani),Func(wib)) transition.start() def setFiring(self,s): self.firing = s if self.firing: self.triggerTime = 0.0<|fim▁end|>
# Create our gun node - both the gun and the ray used for shooting track this - allows for gun jitter, kick back etc... parent = xml.find('parent')
<|file_name|>multitrait.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or<|fim▁hole|>// option. This file may not be copied, modified, or distributed // except according to those terms. struct S { y: isize } impl Cmp, ToString for S { //~ ERROR: expected one of `(`, `+`, `::`, or `{`, found `,` fn eq(&&other: S) { false } fn to_string(&self) -> String { "hi".to_string() } }<|fim▁end|>
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
<|file_name|>issue-31702.rs<|end_file_name|><|fim▁begin|>// run-pass // aux-build:issue-31702-1.rs // aux-build:issue-31702-2.rs // this test is actually entirely in the linked library crates extern crate issue_31702_1;<|fim▁hole|><|fim▁end|>
extern crate issue_31702_2; fn main() {}
<|file_name|>threejs_coordinator.ts<|end_file_name|><|fim▁begin|>/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ import * as THREE from 'three'; import {Coordinator} from './coordinator'; import {Rect} from './internal_types'; /** * Unlike Coordinator, ThreeCoordinator uses internal coordinate system. * * Three.js has a notion of camera and it can efficiently update the canvas when the * canvas dimension changes; it does not have to re-transform coordinates of each * DataSeries, but, instead, only have to update the camera. * * In this coordinator, the output coordinate system is static from [0, 1000]. */ export class ThreeCoordinator extends Coordinator { override isYAxisPointedDown() { return false; } private readonly camera = new THREE.OrthographicCamera( 0, 1000, 1000, 0, 0, 100 ); <|fim▁hole|> // We set the camera extent based on the dom container size so the dimensions in // camera coordinate corresponds to dimensions in pixels. This way, in order to draw, // for example a circle, we don't have to map a pixel size to camera dimensions // (which may have different aspect ratio and can draw a circle as an oval). super.setDomContainerRect(rect); this.camera.left = rect.x; this.camera.right = rect.x + rect.width; this.camera.top = rect.y + rect.height; this.camera.bottom = rect.y; this.camera.updateProjectionMatrix(); } getCamera() { return this.camera; } }<|fim▁end|>
override setDomContainerRect(rect: Rect) {
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>''' test.backend.svrtools.crypto.__init__ ''' import unittest from my.backend import Backend # from my.backend.crypto import is_this_the_correct_dollhouse_password from my.globals.exceptions import WrongDollhousePasswordError from my.miscellany import random_alphanum_string class Test_is_this_the_correct_dollhouse_password(unittest.TestCase): ''' Test the supplied password. If it's the right one to decrypt the dollhouse key, return True; else, False. Inputs: pw string; if we are using DMCRYPT, use 'pw' to try to decrypt the key and check to see if the password is the right onw. If we are using ECRYPTFS, this function is not used because ECRYPTFS (in our usage) does not use an external keyfile but uses a password instead. Outputs: True/False is password valid? ''' def setUp(self): Backend.dollhouse.close() def tearDown(self): Backend.dollhouse.close() # def test_is_this_the_correct_dollhouse_password(self): # '''<|fim▁hole|># Backend.dollhouse.open(pw=pw, wipe=True) # self.assertTrue(Backend.dollhouse.is_open) # Backend.dollhouse.close() # self.assertTrue(is_this_the_correct_dollhouse_password(pw)) # self.assertFalse(Backend.dollhouse.is_open) # self.assertFalse(is_this_the_correct_dollhouse_password('nahh' + pw + 'unhunh')) def test_data_persistence(self): ''' Test the mounted dollhouse's ability to retain information in between dismounts/mounts. ''' for pw in (random_alphanum_string(20), '12345678', random_alphanum_string(10), '12345678'): Backend.dollhouse.open(pw=pw, wipe=True) teststring = random_alphanum_string(30) with open('/.home_rw/hi.txt', 'w') as f: f.write(teststring) Backend.dollhouse.close() Backend.dollhouse.open(pw=pw) with open('/.home_rw/hi.txt', 'r') as f: whatwassaved = f.read() self.assertEqual(teststring, whatwassaved) Backend.dollhouse.close() try: Backend.dollhouse.open(teststring) raise Exception('We should not be able to unlock with wrong password') except WrongDollhousePasswordError: pass self.assertTrue(Backend.dollhouse.is_open) Backend.dollhouse.close() self.assertFalse(Backend.dollhouse.is_open) if __name__ == "__main__": # import sys;sys.argv = ['', 'Test.testName'] unittest.main() if __name__ == "__main__": unittest.main()<|fim▁end|>
# Test is_this_the_correct_dollhouse_password() with a known-good password # and then with a known-bad password # ''' # for pw in (random_alphanum_string(20), '12345678', random_alphanum_string(10), '12345678'):
<|file_name|>testSceneList.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import sys sys.path.append("../../") #import pyRay as ra import pyRay.scene as scn # TODO : how to pass arguments from function header?<|fim▁hole|>object2 = ("obj1",("f","f3"),[( "U","sdBox" ,"%s",("2",) ),( "S","sdSphere","%s",("1",) )]) object3 = ("obj2",("f","f2"),[( "U","sdBox" ,"%s",(("2",1.0),) ),( "S","sdSphere","%s",("1",) )]) scene = [ ( "U","sdBox" ,"%s",((1.0,1.0,1.0),) ), ( "S","sdSphere","%s",(1.2,) ), ] scene_src = scn.parseSceneList(scene) print scene_src<|fim▁end|>
object1 = ("obj1",(), [( "U","sdBox" ,"%s",((1.0,1.0,1.0),) ),( "S","sdSphere","%s",(1.2,) )])
<|file_name|>p052.rs<|end_file_name|><|fim▁begin|>//! [Problem 52](https://projecteuler.net/problem=52) solver. #![warn(bad_style, unused, unused_extern_crates, unused_import_braces,<|fim▁hole|>#[macro_use(problem)] extern crate common; extern crate integer; use integer::Integer; fn compute() -> u32 { let radix = 10; let repeat = 6; let mut n = 0; let mut order = 1; let mut limit = (order - 1) / repeat; loop { n += 1; if n > limit { // skip if the num of digits of n * repeat is not the same with n. n = order; order *= radix; limit = (order - 1) / repeat; } let ds = n.into_digit_histogram(); // n * 5 must contains 0 or 5. if ds[0] == 0 && ds[5] == 0 { continue } // n * 2, n * 4 must contains some evens. if ds[0] == 0 && ds[2] == 0 && ds[4] == 0 && ds[6] == 0 && ds[8] == 0 { continue } if ds != (n * 2).into_digit_histogram() { continue } if ds != (n * 3).into_digit_histogram() { continue } if ds != (n * 4).into_digit_histogram() { continue } if ds != (n * 5).into_digit_histogram() { continue } if ds != (n * 6).into_digit_histogram() { continue } return n } } fn solve() -> String { compute().to_string() } problem!("142857", solve);<|fim▁end|>
unused_qualifications, unused_results)]
<|file_name|>names.rs<|end_file_name|><|fim▁begin|>//! This example of boxcars extracts all the player names found in the "PlayerStats" property of the //! header. This property may be absent in some replays or lack players that drop or join mid-game. //! A more foolproof approach is to scour the network data for a specific property: //! "Engine.PlayerReplicationInfo:PlayerName". This example shows both methods. The error handling //! demonstrated is minimal, relying on stringly typed errors. In practice, prefer a richer error //! type. use boxcars::{HeaderProp, ParserBuilder};<|fim▁hole|>use std::error; use std::io::{self, Read}; /// Given an array of objects (represented as a slice of key-value pairs), find all the instances /// of the "Name" key and extract the string value fn names_in_header(stats: &[Vec<(String, HeaderProp)>]) -> impl Iterator<Item = &str> { stats .iter() .flat_map(|v| v.iter()) .filter(|(prop_name, _)| *prop_name == "Name") .filter_map(|(_, prop_val)| prop_val.as_string()) } /// Given network frames and the object id to "Engine.PlayerReplicationInfo:PlayerName", comb /// through all the attributes looking for attributes that have our object id. fn names_in_network(frames: &[boxcars::Frame], name_attribute_id: boxcars::ObjectId) -> Vec<&str> { let mut names = frames .iter() .flat_map(|x| x.updated_actors.iter()) .filter(|attr| attr.object_id == name_attribute_id) .filter_map(|attr| { // PlayerName will be a string attribute if let boxcars::Attribute::String(ref s) = attr.attribute { Some(s.as_str()) } else { None } }) .collect::<Vec<_>>(); // This list will contain many duplicates so we dedup it before returning. names.sort(); names.dedup(); names } /// This function looks up the object id for "Engine.PlayerReplicationInfo:PlayerName". The object /// id is the same as the index of that value in the `replay.objects` array. fn player_name_object_id( replay: &boxcars::Replay, ) -> Result<boxcars::ObjectId, Box<dyn error::Error>> { let id = replay .objects .iter() .position(|val| val == "Engine.PlayerReplicationInfo:PlayerName") .map(|index| boxcars::ObjectId(index as i32)) .ok_or("Expected Engine.PlayerReplicationInfo:PlayerName to be present in replay")?; Ok(id) } fn main() -> Result<(), Box<dyn error::Error>> { let mut data = Vec::new(); io::stdin().read_to_end(&mut data)?; let replay = ParserBuilder::new(&data[..]) .on_error_check_crc() .ignore_network_data_on_error() .parse()?; let stats_prop = replay .properties .iter() .find(|(prop, _)| *prop == "PlayerStats"); if let Some((_, stats)) = stats_prop { println!("Names in the header data:"); let stats = match stats { HeaderProp::Array(arr) => arr, _ => return Err("expected player stats to be an array".into()), }; let header_names = names_in_header(&stats); for name in header_names { println!("{}", name); } } else { println!("No player names found in the header"); } if let Some(network) = replay.network_frames.as_ref() { println!("Names in the network data:"); let name_attribute_id = player_name_object_id(&replay)?; let names = names_in_network(&network.frames, name_attribute_id); for name in names { println!("{}", name); } } else { println!("No player names found in the header as network data couldn't be decoded") } Ok(()) }<|fim▁end|>
<|file_name|>stack_sq.go<|end_file_name|><|fim▁begin|>package stack /* #include<stdio.h> */ import ( "bufio" "errors" "fmt" "os" ) const ( MAXSIZE int = 100 ) // var ( // stackArray [MAXSIZE]int // ) type SqStack struct { base int top int StackSize int stackArray [MAXSIZE]int } //Init 初始化一个栈 func (s *SqStack) Init() { s.base = 0 s.top = 0 s.StackSize = MAXSIZE } //Push 推入栈中一个元素 func (s *SqStack) Push(e int) error { if s.top-s.base >= MAXSIZE { return errors.New("栈满") } s.stackArray[s.top] = e s.top++ return nil } //Pop 出栈一个元素 func (s *SqStack) Pop() (int, error) { if s.top <= s.base { return 0, errors.New("栈空") } e := s.stackArray[s.top-1] s.top-- return e, nil } //Peek 查看一下栈顶元素的值 func (s *SqStack) Peek() (int, error) { if s.top <= s.base { return 0, errors.New("栈空") } return s.stackArray[s.top-1], nil } //Conversion 数制转换 func Conversion(num int) { stack := &SqStack{} stack.Init() n := num for n != 0 { stack.Push(n % 8) n /= 8 } for { e, err := stack.Pop() if err != nil { break } fmt.Printf("%d ", e) } fmt.Println() } //getParenthesis 获取配对的字符 func getParenthesis(c int) int { switch c { case int('('): return int(')') case int('['): return int(']') case int('{'): return int('}') } return 0 } //ParenthesisMatching 括号匹配 func ParenthesisMatching(data string) bool { if len(data)%2 != 0 || len(data) <= 0 { return false } stack := &SqStack{} stack.Init() for i := 0; i < len(data); i++ { switch data[i] { case '(': fallthrough case '[': fallthrough case '{': stack.Push(int(data[i]))<|fim▁hole|> default: e, err := stack.Pop() if err != nil { return false } if getParenthesis(e) != int(data[i]) { return false } } } return true } //LineEdit 行编辑程序 func LineEdit() { reader := bufio.NewReader(os.Stdin) stack := &SqStack{} stack.Init() for { c, _ := reader.ReadByte() if c == '\r' || c == '\n' { break } switch c { case '#': stack.Pop() case '@': for { _, err := stack.Pop() if err != nil { return } } default: stack.Push(int(c)) } } stack1 := &SqStack{} stack1.Init() for { e, err := stack.Pop() if err != nil { break } stack1.Push(e) } for { e, err := stack1.Pop() if err != nil { break } fmt.Printf("%c", e) } }<|fim▁end|>
<|file_name|>unity_impl.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2020 The Gulden developers // Authored by: Malcolm MacLeod ([email protected]) // Distributed under the GULDEN software license, see the accompanying // file COPYING //Workaround braindamaged 'hack' in libtool.m4 that defines DLL_EXPORT when building a dll via libtool (this in turn imports unwanted symbols from e.g. pthread that breaks static pthread linkage) #ifdef DLL_EXPORT #undef DLL_EXPORT #endif // Unity specific includes #include "unity_impl.h" #include "libinit.h" // Standard gulden headers #include "appname.h" #include "clientversion.h" #include "util.h" #include "witnessutil.h" #include "ui_interface.h" #include "unity/appmanager.h" #include "utilmoneystr.h" #include <chain.h> #include "consensus/validation.h" #include "net.h" #include "wallet/mnemonic.h" #include "net_processing.h" #include "wallet/spvscanner.h" #include "sync.h" #include "init.h" // Djinni generated files #include "i_library_controller.hpp" #include "i_library_listener.hpp" #include "qr_code_record.hpp" #include "balance_record.hpp" #include "uri_record.hpp" #include "uri_recipient.hpp" #include "mutation_record.hpp" #include "input_record.hpp" #include "output_record.hpp" #include "address_record.hpp" #include "peer_record.hpp" #include "block_info_record.hpp" #include "monitor_record.hpp" #include "monitor_listener.hpp" #include "payment_result_status.hpp" #include "mnemonic_record.hpp" #ifdef __ANDROID__ #include "djinni_support.hpp" #endif // External libraries #include <boost/algorithm/string.hpp> #include <boost/program_options/parsers.hpp> #include <qrencode.h> #include <memory> #include "pow/pow.h" #include <crypto/hash/sigma/sigma.h> #include <algorithm> std::shared_ptr<ILibraryListener> signalHandler; CCriticalSection cs_monitoringListeners; std::set<std::shared_ptr<MonitorListener> > monitoringListeners; boost::asio::io_context ioctx; boost::asio::executor_work_guard<boost::asio::io_context::executor_type> work = boost::asio::make_work_guard(ioctx); boost::thread run_thread(boost::bind(&boost::asio::io_context::run, boost::ref(ioctx))); static const int64_t nClientStartupTime = GetTime(); std::vector<CAccount*> GetAccountsForAccount(CAccount* forAccount) { std::vector<CAccount*> forAccounts; forAccounts.push_back(forAccount); for (const auto& [accountUUID, account] : pactiveWallet->mapAccounts) { (unused) accountUUID; if (account->getParentUUID() == forAccount->getUUID()) { forAccounts.push_back(account); } } return forAccounts; } TransactionStatus getStatusForTransaction(const CWalletTx* wtx) { TransactionStatus status; int depth = wtx->GetDepthInMainChain(); if (depth < 0) status = TransactionStatus::CONFLICTED; else if (depth == 0) { if (wtx->isAbandoned()) status = TransactionStatus::ABANDONED; else status = TransactionStatus::UNCONFIRMED; } else if (depth < RECOMMENDED_CONFIRMATIONS) { status = TransactionStatus::CONFIRMING; } else { status = TransactionStatus::CONFIRMED; } return status; } std::string getRecipientAddressesForWalletTransaction(CAccount* forAccount, CWallet* pWallet, const CWalletTx* wtx, bool isSentByUs) { std::string address = ""; for (const CTxOut& txout: wtx->tx->vout) { bool isMine = false; if (forAccount && IsMine(*forAccount, txout)) { isMine = true; } if (!forAccount && pWallet && IsMine(*pWallet, txout)) { isMine = true; } if ((isSentByUs && !isMine) || (!isSentByUs && isMine)) { CNativeAddress addr; CTxDestination dest; if (!ExtractDestination(txout, dest) && !txout.IsUnspendable()) { dest = CNoDestination(); } if (addr.Set(dest)) { if (!address.empty()) address += ", "; address += addr.ToString(); } } } return address; } void addMutationsForTransaction(const CWalletTx* wtx, std::vector<MutationRecord>& mutations, CAccount* forAccount) { // exclude generated that are orphaned if (wtx->IsCoinBase() && wtx->GetDepthInMainChain() < 1) return; int64_t subtracted = wtx->GetDebit(ISMINE_SPENDABLE, forAccount, true); int64_t added = wtx->GetCredit(ISMINE_SPENDABLE, forAccount, true) + wtx->GetImmatureCredit(false, forAccount, true); uint64_t time = wtx->nTimeSmart; std::string hash = wtx->GetHash().ToString(); TransactionStatus status = getStatusForTransaction(wtx); int depth = wtx->GetDepthInMainChain(); // if any funds were subtracted the transaction was sent by us if (subtracted > 0) { int64_t fee = subtracted - wtx->tx->GetValueOut(); int64_t change = wtx->GetChange(); std::string recipientAddresses = getRecipientAddressesForWalletTransaction(forAccount, pactiveWallet, wtx, true); // detect internal transfer and split it if (subtracted - fee == added) { // amount received mutations.push_back(MutationRecord(added - change, time, hash, recipientAddresses, status, depth)); // amount send including fee mutations.push_back(MutationRecord(change - subtracted, time, hash, recipientAddresses, status, depth)); } else { mutations.push_back(MutationRecord(added - subtracted, time, hash, recipientAddresses, status, depth)); } } else if (added != 0) // nothing subtracted so we received funds { std::string recipientAddresses = getRecipientAddressesForWalletTransaction(forAccount, pactiveWallet, wtx, false); mutations.push_back(MutationRecord(added, time, hash, recipientAddresses, status, depth)); } } TransactionRecord calculateTransactionRecordForWalletTransaction(const CWalletTx& wtx, std::vector<CAccount*>& forAccounts, bool& anyInputsOrOutputsAreMine) { CWallet* pwallet = pactiveWallet; std::vector<InputRecord> inputs; std::vector<OutputRecord> outputs; int64_t subtracted=0; int64_t added=0; for (const auto& account : forAccounts) { subtracted += wtx.GetDebit(ISMINE_SPENDABLE, account, true); added += wtx.GetCredit(ISMINE_SPENDABLE, account, true); } CAmount fee = 0; // if any funds were subtracted the transaction was sent by us if (subtracted > 0) fee = subtracted - wtx.tx->GetValueOut(); const CTransaction& tx = *wtx.tx; for (const CTxIn& txin: tx.vin) { std::string address; CNativeAddress addr; CTxDestination dest = CNoDestination(); // Try to extract destination, this is not possible in general. Only if the previous // ouput of our input happens to be in our wallet. Which will usually only be the case for // our own transactions. uint256 txHash; if (txin.GetPrevOut().isHash) { txHash = txin.GetPrevOut().getTransactionHash(); } else { if (!pwallet->GetTxHash(txin.GetPrevOut(), txHash)) { LogPrintf("Transaction with no corresponding hash found, txid [%d] [%d]\n", txin.GetPrevOut().getTransactionBlockNumber(), txin.GetPrevOut().getTransactionIndex()); continue; } } std::map<uint256, CWalletTx>::const_iterator mi = pwallet->mapWallet.find(txHash); if (mi != pwallet->mapWallet.end()) { const CWalletTx& prev = (*mi).second; if (txin.GetPrevOut().n < prev.tx->vout.size()) { const auto& prevOut = prev.tx->vout[txin.GetPrevOut().n]; if (!ExtractDestination(prevOut, dest) && !prevOut.IsUnspendable()) { LogPrintf("Unknown transaction type found, txid %s\n", wtx.GetHash().ToString()); dest = CNoDestination(); } } } if (addr.Set(dest)) { address = addr.ToString(); } std::string label; std::string description; if (pwallet->mapAddressBook.count(address)) { const auto& data = pwallet->mapAddressBook[address]; label = data.name; description = data.description; } bool isMine = false; for (const auto& account : forAccounts) { if (static_cast<const CExtWallet*>(pwallet)->IsMine(*account, txin)) { isMine = true; anyInputsOrOutputsAreMine = true; } } inputs.push_back(InputRecord(address, label, description, isMine)); } for (const CTxOut& txout: tx.vout) { std::string address; CNativeAddress addr; CTxDestination dest; if (!ExtractDestination(txout, dest) && !txout.IsUnspendable()) { LogPrintf("Unknown transaction type found, txid %s\n", tx.GetHash().ToString()); dest = CNoDestination(); } if (addr.Set(dest)) { address = addr.ToString(); } std::string label; std::string description; if (pwallet->mapAddressBook.count(address)) { const auto& data = pwallet->mapAddressBook[address]; label = data.name; description = data.description; } bool isMine = false; for (const auto& account : forAccounts) { if (IsMine(*account, txout)) { isMine = true; anyInputsOrOutputsAreMine = true; } } outputs.push_back(OutputRecord(txout.nValue, address, label, description, isMine)); } TransactionStatus status = getStatusForTransaction(&wtx); return TransactionRecord(wtx.GetHash().ToString(), wtx.nTimeSmart, added - subtracted, fee, status, wtx.nHeight, wtx.nBlockTime, wtx.GetDepthInMainChain(), inputs, outputs); } // rate limited balance change notifier static CRateLimit<int>* balanceChangeNotifier=nullptr; // rate limited new mutations notifier static CRateLimit<std::pair<uint256, bool>>* newMutationsNotifier=nullptr; void terminateUnityFrontend() { if (signalHandler) { signalHandler->notifyShutdown(); } // Allow frontend time to clean up and free any references to objects before unloading the library // Otherwise we get a free after close (on macOS at least) while (signalHandler.use_count() > 1) { MilliSleep(50); } signalHandler=nullptr; } #include <boost/chrono/thread_clock.hpp> static float lastProgress=0; void handlePostInitMain() { //fixme: (SIGMA) (PHASE4) Remove this once we have witness-header-sync // Select appropriate verification factor based on devices performance. std::thread([=] { // When available measure thread relative cpu time to avoid effects of thread suspension // which occur when observing system time. #if false && defined(BOOST_CHRONO_HAS_THREAD_CLOCK) && BOOST_CHRONO_THREAD_CLOCK_IS_STEADY boost::chrono::time_point tpStart = boost::chrono::thread_clock::now(); #else uint64_t nStart = GetTimeMicros(); #endif // note that measurement is on single thread, which makes the measurement more stable // actual verification might use more threads which helps overall app performance sigma_verify_context verify(defaultSigmaSettings, 1); CBlockHeader header; verify.verifyHeader<1>(header); // We want at least 1000 blocks per second #if false && defined(BOOST_CHRONO_HAS_THREAD_CLOCK) && BOOST_CHRONO_THREAD_CLOCK_IS_STEADY boost::chrono::microseconds ms = boost::chrono::duration_cast<boost::chrono::microseconds>(boost::chrono::thread_clock::now() - tpStart); uint64_t nTotal = ms.count(); #else uint64_t nTotal = GetTimeMicros() - nStart; #endif uint64_t nPerSec = 1000000/nTotal; if (nPerSec > 1000) // Fast enough to do most the blocks { verifyFactor = 5; } else if(nPerSec > 0) // Slower so reduce the number of blocks { // 2 in verifyFactor chance of verifying. // We verify 2 in verifyFactor blocks - or target_speed/(num_per_sec/2) verifyFactor = 1000/(nPerSec/2.0); verifyFactor = std::max((uint64_t)5, verifyFactor); verifyFactor = std::min((uint64_t)200, verifyFactor); } LogPrintf("unity: selected verification factor %d", verifyFactor); }).detach(); if (signalHandler) { signalHandler->notifyCoreReady(); } // unified progress notification if (!GetBoolArg("-spv", DEFAULT_SPV)) { static bool haveFinishedHeaderSync=false; static int totalHeaderCount=0; static int startHeight = chainActive.Tip() ? chainActive.Tip()->nHeight : 0; // If tip is relatively recent set progress to "completed" to begin with if (chainActive.Tip() && ((GetTime() - chainActive.Tip()->nTime) < 3600)) { lastProgress = 1.0; } // Weight a full header sync as 20%, blocks as rest uiInterface.NotifyHeaderProgress.connect([=](int currentCount, int probableHeight, int headerTipHeight, int64_t headerTipTime) { totalHeaderCount = currentCount; if (currentCount == probableHeight) { haveFinishedHeaderSync = true; } if (!haveFinishedHeaderSync && signalHandler && IsInitialBlockDownload()) { float progress = ((((float)currentCount-startHeight)/((float)probableHeight-startHeight))*0.20); if (lastProgress != 1 && (progress-lastProgress > 0.02 || progress == 1)) { lastProgress = progress; signalHandler->notifyUnifiedProgress(progress); } } }); uiInterface.NotifyBlockTip.connect([=](bool isInitialBlockDownload, const CBlockIndex* pNewTip) { if (haveFinishedHeaderSync && signalHandler) { float progress = pNewTip->nHeight==totalHeaderCount?1:((0.20+((((float)pNewTip->nHeight-startHeight)/((float)totalHeaderCount-startHeight))*0.80))); if (lastProgress != 1 && (progress-lastProgress > 0.02 || progress == 1)) { lastProgress = progress; signalHandler->notifyUnifiedProgress(progress); } } }); } else { uiInterface.NotifyUnifiedProgress.connect([=](float progress) { if (signalHandler) { signalHandler->notifyUnifiedProgress(progress); } }); // monitoring listeners notifications uiInterface.NotifyHeaderProgress.connect([=](int, int, int, int64_t) { int32_t height, probable_height, offset; { LOCK(cs_main); height = partialChain.Height(); probable_height = GetProbableHeight(); offset = partialChain.HeightOffset(); } LOCK(cs_monitoringListeners); for (const auto &listener: monitoringListeners) { listener->onPartialChain(height, probable_height, offset); } }); uiInterface.NotifySPVPrune.connect([=](int height) { LOCK(cs_monitoringListeners); for (const auto &listener: monitoringListeners) { listener->onPruned(height); } }); uiInterface.NotifySPVProgress.connect([=](int /*start_height*/, int processed_height, int /*probable_height*/) { LOCK(cs_monitoringListeners); for (const auto &listener: monitoringListeners) { listener->onProcessedSPVBlocks(processed_height); } }); } // Update transaction/balance changes if (pactiveWallet) { // Fire events for transaction depth changes (up to depth 10 only) pactiveWallet->NotifyTransactionDepthChanged.connect( [&](CWallet* pwallet, const uint256& hash) { LOCK2(cs_main, pwallet->cs_wallet); if (pwallet->mapWallet.find(hash) != pwallet->mapWallet.end()) { const CWalletTx& wtx = pwallet->mapWallet[hash]; LogPrintf("unity: notify transaction depth changed %s",hash.ToString().c_str()); if (signalHandler) { std::vector<CAccount*> forAccounts = GetAccountsForAccount(pactiveWallet->activeAccount); bool anyInputsOrOutputsAreMine = false; TransactionRecord walletTransaction = calculateTransactionRecordForWalletTransaction(wtx, forAccounts, anyInputsOrOutputsAreMine); if (anyInputsOrOutputsAreMine) { signalHandler->notifyUpdatedTransaction(walletTransaction); } } } } ); // Fire events for transaction status changes, or new transactions (this won't fire for simple depth changes) pactiveWallet->NotifyTransactionChanged.connect( [&](CWallet* pwallet, const uint256& hash, ChangeType status, bool fSelfComitted) { LOCK2(cs_main, pwallet->cs_wallet); if (pwallet->mapWallet.find(hash) != pwallet->mapWallet.end()) { if (status == CT_NEW) { newMutationsNotifier->trigger(std::make_pair(hash, fSelfComitted)); } else if (status == CT_UPDATED && signalHandler) { LogPrintf("unity: notify tx updated %s",hash.ToString().c_str()); const CWalletTx& wtx = pwallet->mapWallet[hash]; std::vector<CAccount*> forAccounts = GetAccountsForAccount(pactiveWallet->activeAccount); bool anyInputsOrOutputsAreMine = false; TransactionRecord walletTransaction = calculateTransactionRecordForWalletTransaction(wtx, forAccounts, anyInputsOrOutputsAreMine); if (anyInputsOrOutputsAreMine) { signalHandler->notifyUpdatedTransaction(walletTransaction); } } //fixme: (UNITY) - Consider implementing f.e.x if a 0 conf transaction gets deleted... // else if (status == CT_DELETED) } balanceChangeNotifier->trigger(0); }); // Fire once immediately to update with latest on load. balanceChangeNotifier->trigger(0); } } void handleInitWithExistingWallet() { if (signalHandler) { signalHandler->notifyInitWithExistingWallet(); } AppLifecycleManager::gApp->initialize(); } void handleInitWithoutExistingWallet() { signalHandler->notifyInitWithoutExistingWallet(); } std::string ILibraryController::BuildInfo() { std::string info = FormatThreeDigitVersion(); #if defined(__aarch64__) info += " aarch64"; #elif defined(__arm__) info += " arm (32bit)"; #elif defined(__x86_64__) info += " x86_64"; #elif defined(__i386__) info += " x86"; #endif return info; } bool ILibraryController::InitWalletFromRecoveryPhrase(const std::string& phrase, const std::string& password) { // Refuse to acknowledge an empty recovery phrase, or one that doesn't pass even the most obvious requirement if (phrase.length() < 16) { return false; } //fixme: (UNITY) (SPV) - Handle all the various birth date (or lack of birthdate) cases here instead of just the one. SecureString phraseOnly; int phraseBirthNumber = 0; AppLifecycleManager::gApp->splitRecoveryPhraseAndBirth(phrase.c_str(), phraseOnly, phraseBirthNumber); if (!checkMnemonic(phraseOnly)) { return false; } // ensure that wallet is initialized with a starting time (else it will start from now and old tx will not be scanned) // Use the hardcoded timestamp 1441212522 of block 250000, we didn't have any recovery phrase style wallets (using current phrase system) before that. if (phraseBirthNumber == 0) phraseBirthNumber = timeToBirthNumber(1441212522L); //fixme: (UNITY) (SPV) - Handle all the various birth date (or lack of birthdate) cases here instead of just the one. AppLifecycleManager::gApp->setRecoveryPhrase(phraseOnly); AppLifecycleManager::gApp->setRecoveryBirthNumber(phraseBirthNumber); AppLifecycleManager::gApp->setRecoveryPassword(password.c_str()); AppLifecycleManager::gApp->isRecovery = true; AppLifecycleManager::gApp->initialize(); return true; } void DoRescanInternal() { if (pactiveWallet) { ResetSPVStartRescanThread(); } } bool ValidateAndSplitRecoveryPhrase(const std::string & phrase, SecureString& mnemonic, int& birthNumber) { if (phrase.length() < 16) return false; AppLifecycleManager::gApp->splitRecoveryPhraseAndBirth(phrase.c_str(), mnemonic, birthNumber); return checkMnemonic(mnemonic) && (birthNumber == 0 || Base10ChecksumDecode(birthNumber, nullptr)); } bool ILibraryController::ContinueWalletFromRecoveryPhrase(const std::string& phrase, const std::string& password) { SecureString phraseOnly; int phraseBirthNumber; if (!ValidateAndSplitRecoveryPhrase(phrase, phraseOnly, phraseBirthNumber)) return false; // ensure that wallet is initialized with a starting time (else it will start from now and old tx will not be scanned) // Use the hardcoded timestamp 1441212522 of block 250000, we didn't have any recovery phrase style wallets (using current phrase system) before that. if (phraseBirthNumber == 0) phraseBirthNumber = timeToBirthNumber(1441212522L); if (!pactiveWallet) { LogPrintf("ContineWalletFromRecoveryPhrase: No active wallet"); return false; } LOCK2(cs_main, pactiveWallet->cs_wallet); AppLifecycleManager::gApp->setRecoveryPhrase(phraseOnly); AppLifecycleManager::gApp->setRecoveryBirthNumber(phraseBirthNumber); AppLifecycleManager::gApp->setRecoveryPassword(password.c_str()); AppLifecycleManager::gApp->isRecovery = true; CWallet::CreateSeedAndAccountFromPhrase(pactiveWallet); // Allow update of balance for deleted accounts/transactions LogPrintf("%s: Update balance and rescan", __func__); balanceChangeNotifier->trigger(0); // Rescan for transactions on the linked account DoRescanInternal(); return true; } bool ILibraryController::IsValidRecoveryPhrase(const std::string & phrase) { SecureString dummyMnemonic; int dummyNumber; return ValidateAndSplitRecoveryPhrase(phrase, dummyMnemonic, dummyNumber); } #include "base58.h" std::string ILibraryController::GenerateGenesisKeys() { std::string address = GetReceiveAddress(); CNativeAddress addr(address); CTxDestination dest = addr.Get(); CPubKey vchPubKeyDevSubsidy; pactiveWallet->GetPubKey(boost::get<CKeyID>(dest), vchPubKeyDevSubsidy); std::string devSubsidyPubKey = HexStr(vchPubKeyDevSubsidy); std::string devSubsidyPubKeyID = boost::get<CKeyID>(dest).GetHex(); CKey key; key.MakeNewKey(true); CPrivKey vchPrivKey = key.GetPrivKey(); CPubKey vchPubKey = key.GetPubKey(); std::string privkey = HexStr<CPrivKey::iterator>(vchPrivKey.begin(), vchPrivKey.end()).c_str(); std::string pubKeyID = vchPubKey.GetID().GetHex(); std::string witnessKeys = GLOBAL_APP_URIPREFIX"://witnesskeys?keys=" + CEncodedSecretKey(key).ToString() + strprintf("#%s", GetAdjustedTime()); return "privkey: "+privkey+"\n"+"pubkeyID: "+pubKeyID+"\n"+"witness: "+witnessKeys+"\n"+"dev subsidy addr: "+address+"\n"+"dev subsidy pubkey: "+devSubsidyPubKey+"\n"+"dev subsidy pubkey ID: "+devSubsidyPubKeyID+"\n"; } MnemonicRecord ILibraryController::GenerateRecoveryMnemonic() { std::vector<unsigned char> entropy(16); GetStrongRandBytes(&entropy[0], 16); int64_t birthTime = GetAdjustedTime(); SecureString phraseOnly = mnemonicFromEntropy(entropy, entropy.size()*8); return ComposeRecoveryPhrase(phraseOnly.c_str(), birthTime); } MnemonicRecord ILibraryController::ComposeRecoveryPhrase(const std::string & mnemonic, int64_t birthTime) { const auto& result = AppLifecycleManager::composeRecoveryPhrase(SecureString(mnemonic), birthTime); return MnemonicRecord(result.first.c_str(), mnemonic.c_str(), result.second); } bool ILibraryController::InitWalletLinkedFromURI(const std::string& linked_uri, const std::string& password) { CEncodedSecretKeyExt<CExtKey> linkedKey; if (!linkedKey.fromURIString(linked_uri)) { return false; } AppLifecycleManager::gApp->setLinkKey(linkedKey); AppLifecycleManager::gApp->isLink = true; AppLifecycleManager::gApp->setRecoveryPassword(password.c_str()); AppLifecycleManager::gApp->initialize(); return true; } bool ILibraryController::ContinueWalletLinkedFromURI(const std::string & linked_uri, const std::string& password) { if (!pactiveWallet) { LogPrintf("%s: No active wallet", __func__); return false; } LOCK2(cs_main, pactiveWallet->cs_wallet); CEncodedSecretKeyExt<CExtKey> linkedKey; if (!linkedKey.fromURIString(linked_uri)) { LogPrintf("%s: Failed to parse link URI", __func__); return false; } AppLifecycleManager::gApp->setLinkKey(linkedKey); AppLifecycleManager::gApp->setRecoveryPassword(password.c_str()); AppLifecycleManager::gApp->isLink = true; CWallet::CreateSeedAndAccountFromLink(pactiveWallet); // Allow update of balance for deleted accounts/transactions LogPrintf("%s: Update balance and rescan", __func__); balanceChangeNotifier->trigger(0); // Rescan for transactions on the linked account DoRescanInternal(); return true; } bool ILibraryController::ReplaceWalletLinkedFromURI(const std::string& linked_uri, const std::string& password) { LOCK2(cs_main, pactiveWallet->cs_wallet); if (!pactiveWallet || !pactiveWallet->activeAccount) { LogPrintf("ReplaceWalletLinkedFromURI: No active wallet"); return false; } // Create ext key for new linked account from parsed data CEncodedSecretKeyExt<CExtKey> linkedKey; if (!linkedKey.fromURIString(linked_uri)) { LogPrintf("ReplaceWalletLinkedFromURI: Failed to parse link URI"); return false; } // Ensure we have a valid location to send all the funds CNativeAddress address(linkedKey.getPayAccount()); if (!address.IsValid()) { LogPrintf("ReplaceWalletLinkedFromURI: invalid address %s", linkedKey.getPayAccount().c_str()); return false; } // Empty wallet to target address LogPrintf("ReplaceWalletLinkedFromURI: Empty accounts into linked address"); bool fSubtractFeeFromAmount = true; std::vector<std::tuple<CWalletTx*, CReserveKeyOrScript*>> transactionsToCommit; for (const auto& [accountUUID, pAccount] : pactiveWallet->mapAccounts) { CAmount nBalance = pactiveWallet->GetBalance(pAccount, false, true, true); if (nBalance > 0) { LogPrintf("ReplaceWalletLinkedFromURI: Empty account into linked address [%s]", getUUIDAsString(accountUUID).c_str()); std::vector<CRecipient> vecSend; CRecipient recipient = GetRecipientForDestination(address.Get(), nBalance, fSubtractFeeFromAmount, GetPoW2Phase(chainTip())); vecSend.push_back(recipient); CWalletTx* pWallettx = new CWalletTx(); CAmount nFeeRequired; int nChangePosRet = -1; std::string strError; CReserveKeyOrScript* pReserveKey = new CReserveKeyOrScript(pactiveWallet, pAccount, KEYCHAIN_CHANGE); std::vector<CKeyStore*> accountsToTry; for ( const auto& accountPair : pactiveWallet->mapAccounts ) { if(accountPair.second->getParentUUID() == pAccount->getUUID()) { accountsToTry.push_back(accountPair.second); } accountsToTry.push_back(pAccount); } if (!pactiveWallet->CreateTransaction(accountsToTry, vecSend, *pWallettx, *pReserveKey, nFeeRequired, nChangePosRet, strError)) { LogPrintf("ReplaceWalletLinkedFromURI: Failed to create transaction %s [%d]",strError.c_str(), nBalance); return false; } transactionsToCommit.push_back(std::tuple(pWallettx, pReserveKey)); } else { LogPrintf("ReplaceWalletLinkedFromURI: Account already empty [%s]", getUUIDAsString(accountUUID).c_str()); } } if (!EraseWalletSeedsAndAccounts()) { LogPrintf("ReplaceWalletLinkedFromURI: Failed to erase seed and accounts"); return false; } AppLifecycleManager::gApp->setLinkKey(linkedKey); AppLifecycleManager::gApp->setRecoveryPassword(password.c_str()); AppLifecycleManager::gApp->isLink = true; CWallet::CreateSeedAndAccountFromLink(pactiveWallet); for (auto& [pWalletTx, pReserveKey] : transactionsToCommit) { CValidationState state; //NB! We delibritely pass nullptr for connman here to prevent transaction from relaying //We allow the relaying to occur inside DoRescan instead if (!pactiveWallet->CommitTransaction(*pWalletTx, *pReserveKey, nullptr, state)) { LogPrintf("ReplaceWalletLinkedFromURI: Failed to commit transaction"); return false; } delete pWalletTx; delete pReserveKey; } // Allow update of balance for deleted accounts/transactions LogPrintf("ReplaceWalletLinkedFromURI: Update balance and rescan"); balanceChangeNotifier->trigger(0); // Rescan for transactions on the linked account DoRescanInternal(); return true; } bool ILibraryController::EraseWalletSeedsAndAccounts() { pactiveWallet->EraseWalletSeedsAndAccounts(); return true; } bool ILibraryController::IsValidLinkURI(const std::string& linked_uri) { CEncodedSecretKeyExt<CExtKey> linkedKey; if (!linkedKey.fromURIString(linked_uri)) return false; return true; } bool testnet_; bool spvMode_; std::string extraArgs_; std::string staticFilterPath_; int64_t staticFilterOffset_; int64_t staticFilterLength_; int32_t ILibraryController::InitUnityLib(const std::string& dataDir, const std::string& staticFilterPath, int64_t staticFilterOffset, int64_t staticFilterLength, bool testnet, bool spvMode, const std::shared_ptr<ILibraryListener>& signalHandler_, const std::string& extraArgs) { balanceChangeNotifier = new CRateLimit<int>([](int) { if (pactiveWallet && signalHandler) { WalletBalances balances; pactiveWallet->GetBalances(balances, pactiveWallet->activeAccount, true); signalHandler->notifyBalanceChange(BalanceRecord(balances.availableIncludingLocked, balances.availableExcludingLocked, balances.availableLocked, balances.unconfirmedIncludingLocked, balances.unconfirmedExcludingLocked, balances.unconfirmedLocked, balances.immatureIncludingLocked, balances.immatureExcludingLocked, balances.immatureLocked, balances.totalLocked)); } }, std::chrono::milliseconds(BALANCE_NOTIFY_THRESHOLD_MS)); newMutationsNotifier = new CRateLimit<std::pair<uint256, bool>>([](const std::pair<uint256, bool>& txInfo) { if (pactiveWallet && signalHandler) { const uint256& txHash = txInfo.first; const bool fSelfComitted = txInfo.second; LOCK2(cs_main, pactiveWallet->cs_wallet); if (pactiveWallet->mapWallet.find(txHash) != pactiveWallet->mapWallet.end()) { const CWalletTx& wtx = pactiveWallet->mapWallet[txHash]; std::vector<MutationRecord> mutations; addMutationsForTransaction(&wtx, mutations, pactiveWallet->activeAccount); for (auto& m: mutations) { LogPrintf("unity: notify new mutation for tx %s", txHash.ToString().c_str()); signalHandler->notifyNewMutation(m, fSelfComitted); } } } }, std::chrono::milliseconds(NEW_MUTATIONS_NOTIFY_THRESHOLD_MS)); // Force the datadir to specific place on e.g. android devices defaultDataDirOverride = dataDir; signalHandler = signalHandler_; testnet_ = testnet; spvMode_ = spvMode; extraArgs_ = extraArgs; staticFilterPath_ = staticFilterPath; staticFilterOffset_ = staticFilterOffset; staticFilterLength_ = staticFilterLength; return InitUnity(); } void InitAppSpecificConfigParamaters() { if (spvMode_) { // SPV wallets definitely shouldn't be listening for incoming connections at all SoftSetArg("-listen", "0"); // Minimise logging for performance reasons SoftSetArg("-debug", "0"); // Turn SPV mode on SoftSetArg("-fullsync", "0"); SoftSetArg("-spv", "1"); #ifdef DJINNI_NODEJS #ifdef SPV_MULTI_ACCOUNT SoftSetArg("-accountpool", "3"); SoftSetArg("-accountpoolmobi", "1"); SoftSetArg("-accountpoolwitness", "1"); SoftSetArg("-accountpoolmining", "1"); #else SoftSetArg("-accountpool", "0"); SoftSetArg("-accountpoolmobi", "0"); SoftSetArg("-accountpoolwitness", "0"); SoftSetArg("-accountpoolmining", "0"); #endif SoftSetArg("-keypool", "10"); #else // Minimise lookahead size for performance reasons SoftSetArg("-accountpool", "1"); // Minimise background threads and memory consumption SoftSetArg("-par", "-100"); SoftSetArg("-maxsigcachesize", "0"); SoftSetArg("-dbcache", "4"); SoftSetArg("-maxmempool", "5"); SoftSetArg("-maxconnections", "8"); //fixme: (FUT) (UNITY) Reverse headers // Temporarily disable reverse headers for mobile until memory requirements can be reduced. SoftSetArg("-reverseheaders", "false"); #endif } SoftSetArg("-spvstaticfilterfile", staticFilterPath_); SoftSetArg("-spvstaticfilterfileoffset", i64tostr(staticFilterOffset_)); SoftSetArg("-spvstaticfilterfilelength", i64tostr(staticFilterLength_)); // Change client name #if defined(__APPLE__) && TARGET_OS_IPHONE == 1 SoftSetArg("-clientname", GLOBAL_APPNAME" ios"); #elif defined(__ANDROID__) SoftSetArg("-clientname", GLOBAL_APPNAME" android"); #else SoftSetArg("-clientname", GLOBAL_APPNAME" desktop"); #endif // Testnet if (testnet_) { SoftSetArg("-testnet", "S1595347850:60"); SoftSetArg("-addnode", "178.62.195.19"); } else { SoftSetArg("-addnode", "178.62.195.19"); SoftSetArg("-addnode", "149.210.165.218"); } if (!extraArgs_.empty()) { std::vector<const char*> args; auto splitted = boost::program_options::split_unix(extraArgs_); for(const auto& part: splitted) args.push_back(part.c_str()); gArgs.ParseExtraParameters(int(args.size()), args.data()); } } void ILibraryController::InitUnityLibThreaded(const std::string& dataDir, const std::string& staticFilterPath, int64_t staticFilterOffset, int64_t staticFilterLength, bool testnet, bool spvMode, const std::shared_ptr<ILibraryListener>& signalHandler_, const std::string& extraArgs) { std::thread([=] { InitUnityLib(dataDir, staticFilterPath, staticFilterOffset, staticFilterLength, testnet, spvMode, signalHandler_, extraArgs); }).detach(); } void ILibraryController::TerminateUnityLib() { // Terminate in thread so we don't block interprocess communication std::thread([=] { work.reset(); ioctx.stop(); AppLifecycleManager::gApp->shutdown(); AppLifecycleManager::gApp->waitForShutDown(); run_thread.join(); }).detach(); } QrCodeRecord ILibraryController::QRImageFromString(const std::string& qr_string, int32_t width_hint) { QRcode* code = QRcode_encodeString(qr_string.c_str(), 0, QR_ECLEVEL_L, QR_MODE_8, 1); if (!code) { return QrCodeRecord(0, std::vector<uint8_t>()); } else { const int32_t generatedWidth = code->width; const int32_t finalWidth = (width_hint / generatedWidth) * generatedWidth; const int32_t scaleMultiplier = finalWidth / generatedWidth; std::vector<uint8_t> dataVector; dataVector.reserve(finalWidth*finalWidth); int nIndex=0; for (int nRow=0; nRow<generatedWidth; ++nRow) { for (int nCol=0; nCol<generatedWidth; ++nCol) { dataVector.insert(dataVector.end(), scaleMultiplier, (code->data[nIndex++] & 1) * 255); } for (int i=1; i<scaleMultiplier; ++i) { dataVector.insert(dataVector.end(), dataVector.end()-finalWidth, dataVector.end()); } } QRcode_free(code); return QrCodeRecord(finalWidth, dataVector); } } std::string ILibraryController::GetReceiveAddress() { LOCK2(cs_main, pactiveWallet->cs_wallet); if (!pactiveWallet || !pactiveWallet->activeAccount) return ""; CReserveKeyOrScript* receiveAddress = new CReserveKeyOrScript(pactiveWallet, pactiveWallet->activeAccount, KEYCHAIN_EXTERNAL); CPubKey pubKey; if (receiveAddress->GetReservedKey(pubKey)) { CKeyID keyID = pubKey.GetID(); receiveAddress->ReturnKey(); delete receiveAddress; return CNativeAddress(keyID).ToString(); } else { return ""; } } //fixme: (UNITY) - find a way to use char[] here as well as on the java side. MnemonicRecord ILibraryController::GetRecoveryPhrase() { if (pactiveWallet && pactiveWallet->activeAccount) { LOCK2(cs_main, pactiveWallet->cs_wallet); //WalletModel::UnlockContext ctx(walletModel->requestUnlock()); //if (ctx.isValid()) { int64_t birthTime = pactiveWallet->birthTime(); std::set<SecureString> allPhrases; for (const auto& seedIter : pactiveWallet->mapSeeds) { SecureString phrase = seedIter.second->getMnemonic(); return ComposeRecoveryPhrase(phrase.c_str(), birthTime); } } } return MnemonicRecord("", "", 0); } bool ILibraryController::IsMnemonicWallet() { if (!pactiveWallet || !pactiveWallet->activeAccount) throw std::runtime_error(_("No active internal wallet.")); LOCK2(cs_main, pactiveWallet->cs_wallet); return pactiveWallet->activeSeed != nullptr; } bool ILibraryController::IsMnemonicCorrect(const std::string & phrase) { if (!pactiveWallet || !pactiveWallet->activeAccount) throw std::runtime_error(_("No active internal wallet.")); SecureString mnemonicPhrase; int birthNumber; AppLifecycleManager::splitRecoveryPhraseAndBirth(SecureString(phrase), mnemonicPhrase, birthNumber); LOCK2(cs_main, pactiveWallet->cs_wallet); for (const auto& seedIter : pactiveWallet->mapSeeds) { if (mnemonicPhrase == seedIter.second->getMnemonic()) return true; } return false; } std::vector<std::string> ILibraryController::GetMnemonicDictionary() { return getMnemonicDictionary(); } //fixme: (UNITY) HIGH - take a timeout value and always lock again after timeout bool ILibraryController::UnlockWallet(const std::string& password) { if (!pactiveWallet) { LogPrintf("UnlockWallet: No active wallet"); return false; } if (!dynamic_cast<CExtWallet*>(pactiveWallet)->IsCrypted()) { LogPrintf("UnlockWallet: Wallet not encrypted"); return false; } return pactiveWallet->Unlock(password.c_str()); } bool ILibraryController::LockWallet() { if (!pactiveWallet) { LogPrintf("LockWallet: No active wallet"); return false; } if (dynamic_cast<CExtWallet*>(pactiveWallet)->IsLocked()) return true; return dynamic_cast<CExtWallet*>(pactiveWallet)->Lock(); } bool ILibraryController::ChangePassword(const std::string& oldPassword, const std::string& newPassword) { if (!pactiveWallet) { LogPrintf("ChangePassword: No active wallet"); return false; } if (newPassword.length() == 0) { LogPrintf("ChangePassword: Refusing invalid password of length 0"); return false; } return pactiveWallet->ChangeWalletPassphrase(oldPassword.c_str(), newPassword.c_str()); } bool ILibraryController::HaveUnconfirmedFunds() { if (!pactiveWallet) return true; WalletBalances balances; pactiveWallet->GetBalances(balances, pactiveWallet->activeAccount, true); if (balances.unconfirmedIncludingLocked > 0 || balances.immatureIncludingLocked > 0) { return true; } return false; } int64_t ILibraryController::GetBalance() { if (!pactiveWallet) return 0; WalletBalances balances; pactiveWallet->GetBalances(balances, pactiveWallet->activeAccount, true); return balances.availableIncludingLocked + balances.unconfirmedIncludingLocked + balances.immatureIncludingLocked; } void ILibraryController::DoRescan() { if (!pactiveWallet) return; // Allocate some extra keys //fixme: Persist this across runs in some way static int32_t extraKeys=0; extraKeys += 5; int nKeyPoolTargetDepth = GetArg("-keypool", DEFAULT_ACCOUNT_KEYPOOL_SIZE)+extraKeys; pactiveWallet->TopUpKeyPool(nKeyPoolTargetDepth, 0, nullptr, 1); // Do the rescan DoRescanInternal(); } UriRecipient ILibraryController::IsValidRecipient(const UriRecord & request) { // return if URI is not valid or is no Gulden: URI std::string lowerCaseScheme = boost::algorithm::to_lower_copy(request.scheme); if (lowerCaseScheme != "gulden") return UriRecipient(false, "", "", "", 0); if (!CNativeAddress(request.path).IsValid()) return UriRecipient(false, "", "", "", 0); std::string address = request.path; std::string label = ""; std::string description = ""; CAmount amount = 0; if (request.items.find("amount") != request.items.end()) { ParseMoney(request.items.find("amount")->second, amount); } if (pactiveWallet) { LOCK2(cs_main, pactiveWallet->cs_wallet); if (pactiveWallet->mapAddressBook.find(address) != pactiveWallet->mapAddressBook.end()) { const auto& data = pactiveWallet->mapAddressBook[address]; label = data.name; description = data.description; } } return UriRecipient(true, address, label, description, amount); } bool ILibraryController::IsValidNativeAddress(const std::string& address) { CNativeAddress addr(address); return addr.IsValid(); } bool ILibraryController::IsValidBitcoinAddress(const std::string& address) { CNativeAddress addr(address); return addr.IsValidBitcoin(); } int64_t ILibraryController::feeForRecipient(const UriRecipient & request) { if (!pactiveWallet) throw std::runtime_error(_("No active internal wallet.")); LOCK2(cs_main, pactiveWallet->cs_wallet); CNativeAddress address(request.address); if (!address.IsValid()) { LogPrintf("feeForRecipient: invalid address %s", request.address.c_str()); throw std::runtime_error(_("Invalid address")); } CRecipient recipient = GetRecipientForDestination(address.Get(), std::min(GetBalance(), request.amount), true, GetPoW2Phase(chainTip())); std::vector<CRecipient> vecSend; vecSend.push_back(recipient); CWalletTx wtx; CAmount nFeeRequired; int nChangePosRet = -1; std::string strError; CReserveKeyOrScript reservekey(pactiveWallet, pactiveWallet->activeAccount, KEYCHAIN_CHANGE); std::vector<CKeyStore*> accountsToTry; for ( const auto& accountPair : pactiveWallet->mapAccounts ) { if(accountPair.second->getParentUUID() == pactiveWallet->activeAccount->getUUID()) { accountsToTry.push_back(accountPair.second); } accountsToTry.push_back(pactiveWallet->activeAccount); } if (!pactiveWallet->CreateTransaction(accountsToTry, vecSend, wtx, reservekey, nFeeRequired, nChangePosRet, strError, NULL, false)) { LogPrintf("feeForRecipient: failed to create transaction %s",strError.c_str()); throw std::runtime_error(strprintf(_("Failed to calculate fee\n%s"), strError)); } return nFeeRequired; } PaymentResultStatus ILibraryController::performPaymentToRecipient(const UriRecipient & request, bool substract_fee) { if (!pactiveWallet) throw std::runtime_error(_("No active internal wallet.")); LOCK2(cs_main, pactiveWallet->cs_wallet); CNativeAddress address(request.address); if (!address.IsValid()) { LogPrintf("performPaymentToRecipient: invalid address %s", request.address.c_str()); throw std::runtime_error(_("Invalid address")); } CRecipient recipient = GetRecipientForDestination(address.Get(), request.amount, substract_fee, GetPoW2Phase(chainTip())); std::vector<CRecipient> vecSend; vecSend.push_back(recipient); CWalletTx wtx; CAmount nFeeRequired; int nChangePosRet = -1; std::string strError; CReserveKeyOrScript reservekey(pactiveWallet, pactiveWallet->activeAccount, KEYCHAIN_CHANGE); std::vector<CKeyStore*> accountsToTry; for ( const auto& accountPair : pactiveWallet->mapAccounts ) { if(accountPair.second->getParentUUID() == pactiveWallet->activeAccount->getUUID()) { accountsToTry.push_back(accountPair.second); } accountsToTry.push_back(pactiveWallet->activeAccount); } if (!pactiveWallet->CreateTransaction(accountsToTry, vecSend, wtx, reservekey, nFeeRequired, nChangePosRet, strError)) { if (!substract_fee && request.amount + nFeeRequired > GetBalance()) { return PaymentResultStatus::INSUFFICIENT_FUNDS; } LogPrintf("performPaymentToRecipient: failed to create transaction %s",strError.c_str()); throw std::runtime_error(strprintf(_("Failed to create transaction\n%s"), strError)); } CValidationState state; if (!pactiveWallet->CommitTransaction(wtx, reservekey, g_connman.get(), state)) { strError = strprintf("Error: The transaction was rejected! Reason given: %s", state.GetRejectReason()); LogPrintf("performPaymentToRecipient: failed to commit transaction %s",strError.c_str()); throw std::runtime_error(strprintf(_("Transaction rejected, reason: %s"), state.GetRejectReason())); } // Prevent accidental double spends for (const auto &txin : wtx.tx->vin) { pactiveWallet->LockCoin(txin.GetPrevOut()); } return PaymentResultStatus::SUCCESS; } std::vector<TransactionRecord> getTransactionHistoryForAccount(CAccount* forAccount) { std::vector<TransactionRecord> ret; LOCK2(cs_main, pactiveWallet->cs_wallet); std::vector<CAccount*> forAccounts = GetAccountsForAccount(forAccount); for (const auto& [hash, wtx] : pactiveWallet->mapWallet) { bool anyInputsOrOutputsAreMine = false; TransactionRecord tx = calculateTransactionRecordForWalletTransaction(wtx, forAccounts, anyInputsOrOutputsAreMine); if (anyInputsOrOutputsAreMine) { ret.push_back(tx); } } std::sort(ret.begin(), ret.end(), [&](TransactionRecord& x, TransactionRecord& y){ return (x.timeStamp > y.timeStamp); }); return ret; } std::vector<TransactionRecord> ILibraryController::getTransactionHistory() { if (!pactiveWallet) return std::vector<TransactionRecord>(); return getTransactionHistoryForAccount(pactiveWallet->activeAccount); } TransactionRecord ILibraryController::getTransaction(const std::string& txHash) { if (!pactiveWallet) throw std::runtime_error(strprintf("No active wallet to query tx hash [%s]", txHash)); uint256 hash = uint256S(txHash); LOCK2(cs_main, pactiveWallet->cs_wallet); if (pactiveWallet->mapWallet.find(hash) == pactiveWallet->mapWallet.end()) throw std::runtime_error(strprintf("No transaction found for hash [%s]", txHash)); std::vector<CAccount*> forAccounts = GetAccountsForAccount(pactiveWallet->activeAccount); const CWalletTx& wtx = pactiveWallet->mapWallet[hash]; bool anyInputsOrOutputsAreMine = false; return calculateTransactionRecordForWalletTransaction(wtx, forAccounts, anyInputsOrOutputsAreMine); } std::string ILibraryController::resendTransaction(const std::string& txHash) { if (!pactiveWallet) throw std::runtime_error(strprintf("No active wallet to query tx hash [%s]", txHash)); uint256 hash = uint256S(txHash); LOCK2(cs_main, pactiveWallet->cs_wallet); if (pactiveWallet->mapWallet.find(hash) == pactiveWallet->mapWallet.end()) return ""; const CWalletTx& wtx = pactiveWallet->mapWallet[hash]; CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << *wtx.tx; std::string strHex = HexStr(ssTx.begin(), ssTx.end()); if(!g_connman) return ""; const uint256& hashTx = wtx.tx->GetHash(); CInv inv(MSG_TX, hashTx); g_connman->ForEachNode([&inv](CNode* pnode) { pnode->PushInventory(inv); }); return strHex; } std::vector<MutationRecord> getMutationHistoryForAccount(CAccount* forAccount) { std::vector<MutationRecord> ret; LOCK2(cs_main, pactiveWallet->cs_wallet); // wallet transactions in reverse chronological ordering std::vector<const CWalletTx*> vWtx; for (const auto& [hash, wtx] : pactiveWallet->mapWallet) { vWtx.push_back(&wtx); } std::sort(vWtx.begin(), vWtx.end(), [&](const CWalletTx* x, const CWalletTx* y){ return (x->nTimeSmart > y->nTimeSmart); }); // build mutation list based on transactions for (const CWalletTx* wtx : vWtx) { addMutationsForTransaction(wtx, ret, forAccount); } return ret; } std::vector<MutationRecord> ILibraryController::getMutationHistory() { if (!pactiveWallet) return std::vector<MutationRecord>(); return getMutationHistoryForAccount(pactiveWallet->activeAccount); } std::vector<AddressRecord> ILibraryController::getAddressBookRecords() { std::vector<AddressRecord> ret; if (pactiveWallet) { LOCK2(cs_main, pactiveWallet->cs_wallet); for(const auto& [address, addressData] : pactiveWallet->mapAddressBook) { ret.emplace_back(AddressRecord(address, addressData.name, addressData.description, addressData.purpose)); } } return ret; } void ILibraryController::addAddressBookRecord(const AddressRecord& address) { if (pactiveWallet) { pactiveWallet->SetAddressBook(address.address, address.name, address.desc, address.purpose); } } void ILibraryController::deleteAddressBookRecord(const AddressRecord& address) { if (pactiveWallet) { pactiveWallet->DelAddressBook(address.address); } } void ILibraryController::PersistAndPruneForSPV() { PersistAndPruneForPartialSync(); } void ILibraryController::ResetUnifiedProgress() { CWallet::ResetUnifiedSPVProgressNotification(); } float ILibraryController::getUnifiedProgress() { if (!GetBoolArg("-spv", DEFAULT_SPV)) { return lastProgress; } else { return CSPVScanner::lastProgressReported; } } std::vector<BlockInfoRecord> ILibraryController::getLastSPVBlockInfos() { std::vector<BlockInfoRecord> ret; LOCK(cs_main); int height = partialChain.Height(); while (ret.size() < 32 && height > partialChain.HeightOffset()) { const CBlockIndex* pindex = partialChain[height]; ret.push_back(BlockInfoRecord(pindex->nHeight, pindex->GetBlockTime(), pindex->GetBlockHashPoW2().ToString())); height--; } return ret; } MonitorRecord ILibraryController::getMonitoringStats() { LOCK(cs_main); int32_t partialHeight_ = partialChain.Height(); int32_t partialOffset_ = partialChain.HeightOffset(); int32_t prunedHeight_ = nPartialPruneHeightDone; int32_t processedSPVHeight_ = CSPVScanner::getProcessedHeight(); int32_t probableHeight_ = GetProbableHeight(); return MonitorRecord(partialHeight_, partialOffset_, prunedHeight_, processedSPVHeight_, probableHeight_); } void ILibraryController::RegisterMonitorListener(const std::shared_ptr<MonitorListener> & listener) { LOCK(cs_monitoringListeners); monitoringListeners.insert(listener); } void ILibraryController::UnregisterMonitorListener(const std::shared_ptr<MonitorListener> & listener) { LOCK(cs_monitoringListeners); monitoringListeners.erase(listener); } std::unordered_map<std::string, std::string> ILibraryController::getClientInfo() { std::unordered_map<std::string, std::string> ret; ret.insert(std::pair("client_version", FormatFullVersion())); ret.insert(std::pair("user_agent", strSubVersion)); ret.insert(std::pair("datadir_path", GetDataDir().string())); std::string logfilePath = (GetDataDir() / "debug.log").string(); ret.insert(std::pair("logfile_path", logfilePath)); ret.insert(std::pair("startup_timestamp", i64tostr(nClientStartupTime))); if (!g_connman->GetNetworkActive()) { ret.insert(std::pair("network_status", "disabled")); ret.insert(std::pair("num_connections_in", "0")); ret.insert(std::pair("num_connections_out", "0")); } else { ret.insert(std::pair("network_status", "enabled")); std::string connectionsIn = i64tostr(g_connman->GetNodeCount(CConnman::NumConnections::CONNECTIONS_IN)); std::string connectionsOut = i64tostr(g_connman->GetNodeCount(CConnman::NumConnections::CONNECTIONS_OUT)); ret.insert(std::pair("num_connections_in", connectionsIn)); ret.insert(std::pair("num_connections_out", connectionsOut)); } <|fim▁hole|> if (partialChain.Tip()) { ret.insert(std::pair("chain_tip_height", i64tostr(partialChain.Height()))); ret.insert(std::pair("chain_tip_time", i64tostr(partialChain.Tip()->GetBlockTime()))); ret.insert(std::pair("chain_tip_hash", partialChain.Tip()->GetBlockHashPoW2().ToString())); ret.insert(std::pair("chain_offset", i64tostr(partialChain.HeightOffset()))); ret.insert(std::pair("chain_pruned_height", i64tostr(nPartialPruneHeightDone))); ret.insert(std::pair("chain_processed_height", i64tostr(CSPVScanner::getProcessedHeight()))); ret.insert(std::pair("chain_probable_height", i64tostr(GetProbableHeight()))); } } else if (chainActive.Tip()) { ret.insert(std::pair("chain_tip_height", i64tostr(chainActive.Tip()->nHeight))); ret.insert(std::pair("chain_tip_time", i64tostr(chainActive.Tip()->GetBlockTime()))); ret.insert(std::pair("chain_tip_hash", chainActive.Tip()->GetBlockHashPoW2().ToString())); } ret.insert(std::pair("mempool_transaction_count", i64tostr(mempool.size()))); ret.insert(std::pair("mempool_memory_size", i64tostr(mempool.GetTotalTxSize()))); return ret; }<|fim▁end|>
if (fSPV) {
<|file_name|>0003_rover.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-09-19 07:46 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('photos', '0002_auto_20160919_0737'), ] operations = [ migrations.CreateModel( name='Rover', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),<|fim▁hole|> ('max_date', models.DateField()), ('max_sol', models.IntegerField()), ('total_photos', models.IntegerField()), ], ), ]<|fim▁end|>
('nasa_id', models.IntegerField(unique=True)), ('name', models.CharField(max_length=30)), ('landing_date', models.DateField()),
<|file_name|>fixtures.py<|end_file_name|><|fim▁begin|>import logging import os import os.path import shutil<|fim▁hole|>from six.moves import urllib import uuid from six.moves.urllib.parse import urlparse # pylint: disable=E0611,F0401 from test.service import ExternalService, SpawnedService from test.testutil import get_open_port log = logging.getLogger(__name__) class Fixture(object): kafka_version = os.environ.get('KAFKA_VERSION', '0.8.0') scala_version = os.environ.get("SCALA_VERSION", '2.8.0') project_root = os.environ.get('PROJECT_ROOT', os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) kafka_root = os.environ.get("KAFKA_ROOT", os.path.join(project_root, 'servers', kafka_version, "kafka-bin")) ivy_root = os.environ.get('IVY_ROOT', os.path.expanduser("~/.ivy2/cache")) @classmethod def download_official_distribution(cls, kafka_version=None, scala_version=None, output_dir=None): if not kafka_version: kafka_version = cls.kafka_version if not scala_version: scala_version = cls.scala_version if not output_dir: output_dir = os.path.join(cls.project_root, 'servers', 'dist') distfile = 'kafka_%s-%s' % (scala_version, kafka_version,) url_base = 'https://archive.apache.org/dist/kafka/%s/' % (kafka_version,) output_file = os.path.join(output_dir, distfile + '.tgz') if os.path.isfile(output_file): log.info("Found file already on disk: %s", output_file) return output_file # New tarballs are .tgz, older ones are sometimes .tar.gz try: url = url_base + distfile + '.tgz' log.info("Attempting to download %s", url) response = urllib.request.urlopen(url) except urllib.error.HTTPError: log.exception("HTTP Error") url = url_base + distfile + '.tar.gz' log.info("Attempting to download %s", url) response = urllib.request.urlopen(url) log.info("Saving distribution file to %s", output_file) with open(output_file, 'w') as output_file_fd: output_file_fd.write(response.read()) return output_file @classmethod def test_resource(cls, filename): return os.path.join(cls.project_root, "servers", cls.kafka_version, "resources", filename) @classmethod def kafka_run_class_args(cls, *args): result = [os.path.join(cls.kafka_root, 'bin', 'kafka-run-class.sh')] result.extend(args) return result @classmethod def kafka_run_class_env(cls): env = os.environ.copy() env['KAFKA_LOG4J_OPTS'] = "-Dlog4j.configuration=file:%s" % cls.test_resource("log4j.properties") return env @classmethod def render_template(cls, source_file, target_file, binding): with open(source_file, "r") as handle: template = handle.read() with open(target_file, "w") as handle: handle.write(template.format(**binding)) class ZookeeperFixture(Fixture): @classmethod def instance(cls): if "ZOOKEEPER_URI" in os.environ: parse = urlparse(os.environ["ZOOKEEPER_URI"]) (host, port) = (parse.hostname, parse.port) fixture = ExternalService(host, port) else: (host, port) = ("127.0.0.1", get_open_port()) fixture = cls(host, port) fixture.open() return fixture def __init__(self, host, port): self.host = host self.port = port self.tmp_dir = None self.child = None def out(self, message): log.info("*** Zookeeper [%s:%d]: %s", self.host, self.port, message) def open(self): self.tmp_dir = tempfile.mkdtemp() self.out("Running local instance...") log.info(" host = %s", self.host) log.info(" port = %s", self.port) log.info(" tmp_dir = %s", self.tmp_dir) # Generate configs template = self.test_resource("zookeeper.properties") properties = os.path.join(self.tmp_dir, "zookeeper.properties") self.render_template(template, properties, vars(self)) # Configure Zookeeper child process args = self.kafka_run_class_args("org.apache.zookeeper.server.quorum.QuorumPeerMain", properties) env = self.kafka_run_class_env() # Party! self.out("Starting...") timeout = 5 max_timeout = 30 backoff = 1 while True: self.child = SpawnedService(args, env) self.child.start() timeout = min(timeout, max_timeout) if self.child.wait_for(r"binding to port", timeout=timeout): break self.child.stop() timeout *= 2 time.sleep(backoff) self.out("Done!") def close(self): self.out("Stopping...") self.child.stop() self.child = None self.out("Done!") shutil.rmtree(self.tmp_dir) class KafkaFixture(Fixture): @classmethod def instance(cls, broker_id, zk_host, zk_port, zk_chroot=None, replicas=1, partitions=2): if zk_chroot is None: zk_chroot = "kafka-python_" + str(uuid.uuid4()).replace("-", "_") if "KAFKA_URI" in os.environ: parse = urlparse(os.environ["KAFKA_URI"]) (host, port) = (parse.hostname, parse.port) fixture = ExternalService(host, port) else: (host, port) = ("127.0.0.1", get_open_port()) fixture = KafkaFixture(host, port, broker_id, zk_host, zk_port, zk_chroot, replicas, partitions) fixture.open() return fixture def __init__(self, host, port, broker_id, zk_host, zk_port, zk_chroot, replicas=1, partitions=2): self.host = host self.port = port self.broker_id = broker_id self.zk_host = zk_host self.zk_port = zk_port self.zk_chroot = zk_chroot self.replicas = replicas self.partitions = partitions self.tmp_dir = None self.child = None self.running = False def out(self, message): log.info("*** Kafka [%s:%d]: %s", self.host, self.port, message) def open(self): if self.running: self.out("Instance already running") return self.tmp_dir = tempfile.mkdtemp() self.out("Running local instance...") log.info(" host = %s", self.host) log.info(" port = %s", self.port) log.info(" broker_id = %s", self.broker_id) log.info(" zk_host = %s", self.zk_host) log.info(" zk_port = %s", self.zk_port) log.info(" zk_chroot = %s", self.zk_chroot) log.info(" replicas = %s", self.replicas) log.info(" partitions = %s", self.partitions) log.info(" tmp_dir = %s", self.tmp_dir) # Create directories os.mkdir(os.path.join(self.tmp_dir, "logs")) os.mkdir(os.path.join(self.tmp_dir, "data")) # Generate configs template = self.test_resource("kafka.properties") properties = os.path.join(self.tmp_dir, "kafka.properties") self.render_template(template, properties, vars(self)) # Party! self.out("Creating Zookeeper chroot node...") args = self.kafka_run_class_args("org.apache.zookeeper.ZooKeeperMain", "-server", "%s:%d" % (self.zk_host, self.zk_port), "create", "/%s" % self.zk_chroot, "kafka-python") env = self.kafka_run_class_env() proc = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if proc.wait() != 0: self.out("Failed to create Zookeeper chroot node") self.out(proc.stdout.read()) self.out(proc.stderr.read()) raise RuntimeError("Failed to create Zookeeper chroot node") self.out("Done!") self.out("Starting...") # Configure Kafka child process args = self.kafka_run_class_args("kafka.Kafka", properties) env = self.kafka_run_class_env() timeout = 5 max_timeout = 30 backoff = 1 while True: self.child = SpawnedService(args, env) self.child.start() timeout = min(timeout, max_timeout) if self.child.wait_for(r"\[Kafka Server %d\], Started" % self.broker_id, timeout=timeout): break self.child.stop() timeout *= 2 time.sleep(backoff) self.out("Done!") self.running = True def close(self): if not self.running: self.out("Instance already stopped") return self.out("Stopping...") self.child.stop() self.child = None self.out("Done!") shutil.rmtree(self.tmp_dir) self.running = False<|fim▁end|>
import subprocess import tempfile import time
<|file_name|>BoardModule.js<|end_file_name|><|fim▁begin|>var BoardVo = require(_path.src + "/vo/BoardVo.js"); var BoardDao = require(_path.src + "/dao/BoardDao.js"); var RoleDao = require(_path.src + "/dao/RoleDao.js"); module.exports.board = function($, el, param, req, next) { var template = this.getTemplate($, el); BoardDao.getBoard(param.id, function(board) { $(el).html(template(board)); next();<|fim▁hole|>}; module.exports.boardList = function($, el, param, req, next) { var vo = new BoardVo(); if(req.session.user != null) { vo.signinUserId = req.session.user.id; vo.signinUserLevel = req.session.user.level; } var template = this.getTemplate($, el); BoardDao.getBoardList(vo, function(boardList) { RoleDao.getRoleList(function(roleList) { $(el).html(template({boardList : boardList, roleList : roleList})); next(); }); }); };<|fim▁end|>
});
<|file_name|>moves-based-on-type-no-recursive-stack-closure.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Tests correct kind-checking of the reason stack closures without the :Copy // bound must be noncopyable. For details see // http://smallcultfollowing.com/babysteps/blog/2013/04/30/the-case-of-the-recurring-closure/ extern crate debug; struct R<'a> { // This struct is needed to create the // otherwise infinite type of a fn that // accepts itself as argument: c: |&mut R, bool|: 'a } fn innocent_looking_victim() { let mut x = Some("hello".to_string()); conspirator(|f, writer| { if writer { x = None; } else { match x { Some(ref msg) => {<|fim▁hole|> println!("{:?}", msg); }, None => fail!("oops"), } } }) } fn conspirator(f: |&mut R, bool|) { let mut r = R {c: f}; f(&mut r, false) //~ ERROR use of moved value } fn main() { innocent_looking_victim() }<|fim▁end|>
(f.c)(f, true); //~^ ERROR: cannot borrow `*f` as mutable because
<|file_name|>autoinstall.py<|end_file_name|><|fim▁begin|>from __future__ import print_function import sys import subprocess class AutoInstall(object): _loaded = set() @classmethod def find_module(cls, name, path, target=None): if path is None and name not in cls._loaded: cls._loaded.add(name) print("Installing", name) try: out = subprocess.check_output(['sudo', sys.executable, '-m', 'pip', 'install', name]) print(out) except Exception as e: print("Failed" + e.message)<|fim▁hole|> sys.meta_path.append(AutoInstall)<|fim▁end|>
return None
<|file_name|>channels_base.py<|end_file_name|><|fim▁begin|>## See "d_bankfull" in update_flow_depth() ######## (2/21/13) ## See "(5/13/10)" for a temporary fix. #------------------------------------------------------------------------ # Copyright (c) 2001-2014, Scott D. Peckham # # Sep 2014. Wrote new update_diversions(). # New standard names and BMI updates and testing. # Nov 2013. Converted TopoFlow to a Python package. # Feb 2013. Adapted to use EMELI framework. # Jan 2013. Shared scalar doubles are now 0D numpy arrays. # This makes them mutable and allows components with # a reference to them to see them change. # So far: Q_outlet, Q_peak, Q_min... # Jan 2013. Revised handling of input/output names. # Oct 2012. CSDMS Standard Names and BMI. # May 2012. Commented out diversions.update() for now. ####### # May 2012. Shared scalar doubles are now 1-element 1D numpy arrays. # This makes them mutable and allows components with # a reference to them to see them change. # So far: Q_outlet, Q_peak, Q_min... # May 2010. Changes to initialize() and read_cfg_file() # Mar 2010. Changed codes to code, widths to width, # angles to angle, nvals to nval, z0vals to z0val, # slopes to slope (for GUI tools and consistency # across all process components) # Aug 2009. Updates. # Jul 2009. Updates. # May 2009. Updates. # Jan 2009. Converted from IDL. #----------------------------------------------------------------------- # NB! In the CFG file, change MANNING and LAW_OF_WALL flags to # a single string entry like "friction method". ######### #----------------------------------------------------------------------- # Notes: Set self.u in manning and law_of_wall functions ?? # Update friction factor in manning() and law_of_wall() ? # Double check how Rh is used in law_of_the_wall(). # d8_flow has "flow_grids", but this one has "codes". # Make sure values are not stored twice. #----------------------------------------------------------------------- #----------------------------------------------------------------------- # NOTES: This file defines a "base class" for channelized flow # components as well as functions used by most or # all channel flow methods. The methods of this class # (especially "update_velocity") should be over-ridden as # necessary for different methods of modeling channelized # flow. See channels_kinematic_wave.py, # channels_diffusive_wave.py and channels_dynamic_wave.py. #----------------------------------------------------------------------- # NOTES: update_free_surface_slope() is called by the # update_velocity() methods of channels_diffusive_wave.py # and channels_dynamic_wave.py. #----------------------------------------------------------------------- # # class channels_component # # ## get_attribute() # (defined in each channel component) # get_input_var_names() # (5/15/12) # get_output_var_names() # (5/15/12) # get_var_name() # (5/15/12) # get_var_units() # (5/15/12) #----------------------------- # set_constants() # initialize() # update() # finalize() # set_computed_input_vars() # (5/11/10) #---------------------------------- # initialize_d8_vars() ######## # initialize_computed_vars() # initialize_diversion_vars() # (9/22/14) # initialize_outlet_values() # initialize_peak_values() # initialize_min_and_max_values() # (2/3/13) #------------------------------------- # update_R() # update_R_integral() # update_discharge() # update_diversions() # (9/22/14) # update_flow_volume() # update_flow_depth() # update_free_surface_slope() # update_shear_stress() # (9/9/14, depth-slope product) # update_shear_speed() # (9/9/14) # update_trapezoid_Rh() # update_friction_factor() # (9/9/14) #---------------------------------- # update_velocity() # (override as needed) # update_velocity_on_edges() # update_froude_number() # (9/9/14) #---------------------------------- # update_outlet_values() # update_peak_values() # (at the main outlet) # update_Q_out_integral() # (moved here from basins.py) # update_mins_and_maxes() # (don't add into update()) # check_flow_depth() # check_flow_velocity() #---------------------------------- # open_input_files() # read_input_files() # close_input_files() #---------------------------------- # update_outfile_names() # bundle_output_files() # (9/21/14. Not used yet) # open_output_files() # write_output_files() # close_output_files() # save_grids() # save_pixel_values() #---------------------------------- # manning_formula() # law_of_the_wall() # print_status_report() # remove_bad_slopes() # Functions: # (stand-alone versions of these) # Trapezoid_Rh() # Manning_Formula() # Law_of_the_Wall() #----------------------------------------------------------------------- import numpy as np import os, os.path from topoflow.utils import BMI_base # from topoflow.utils import d8_base from topoflow.utils import file_utils ### from topoflow.utils import model_input from topoflow.utils import model_output from topoflow.utils import ncgs_files ### from topoflow.utils import ncts_files ### from topoflow.utils import rtg_files ### from topoflow.utils import text_ts_files ### from topoflow.utils import tf_d8_base as d8_base from topoflow.utils import tf_utils #----------------------------------------------------------------------- class channels_component( BMI_base.BMI_component ): #----------------------------------------------------------- # Note: rainfall_volume_flux *must* be liquid-only precip. #----------------------------------------------------------- _input_var_names = [ 'atmosphere_water__rainfall_volume_flux', # (P_rain) 'glacier_ice__melt_volume_flux', # (MR) ## 'land_surface__elevation', ## 'land_surface__slope', 'land_surface_water__baseflow_volume_flux', # (GW) 'land_surface_water__evaporation_volume_flux', # (ET) 'soil_surface_water__infiltration_volume_flux', # (IN) 'snowpack__melt_volume_flux', # (SM) 'water-liquid__mass-per-volume_density' ] # (rho_H2O) #------------------------------------------------------------------ # 'canals__count', # n_canals # 'canals_entrance__x_coordinate', # canals_in_x # 'canals_entrance__y_coordinate', # canals_in_y # 'canals_entrance_water__volume_fraction', # Q_canals_fraction # 'canals_exit__x_coordinate', # canals_out_x # 'canals_exit__y_coordinate', # canals_out_y # 'canals_exit_water__volume_flow_rate', # Q_canals_out # 'sinks__count', # n_sinks # 'sinks__x_coordinate', # sinks_x # 'sinks__y_coordinate', # sinks_y # 'sinks_water__volume_flow_rate', # Q_sinks # 'sources__count', # n_sources # 'sources__x_coordinate', # sources_x # 'sources__y_coordinate', # sources_y # 'sources_water__volume_flow_rate' ] # Q_sources #---------------------------------- # Maybe add these out_vars later. #---------------------------------- # ['time_sec', 'time_min' ] _output_var_names = [ 'basin_outlet_water_flow__half_of_fanning_friction_factor', # f_outlet 'basin_outlet_water_x-section__mean_depth', # d_outlet 'basin_outlet_water_x-section__peak_time_of_depth', # Td_peak 'basin_outlet_water_x-section__peak_time_of_volume_flow_rate', # T_peak 'basin_outlet_water_x-section__peak_time_of_volume_flux', # Tu_peak 'basin_outlet_water_x-section__time_integral_of_volume_flow_rate', # vol_Q 'basin_outlet_water_x-section__time_max_of_mean_depth', # d_peak 'basin_outlet_water_x-section__time_max_of_volume_flow_rate', # Q_peak 'basin_outlet_water_x-section__time_max_of_volume_flux', # u_peak 'basin_outlet_water_x-section__volume_flow_rate', # Q_outlet 'basin_outlet_water_x-section__volume_flux', # u_outlet #-------------------------------------------------- 'canals_entrance_water__volume_flow_rate', # Q_canals_in #-------------------------------------------------- 'channel_bottom_surface__slope', # S_bed 'channel_bottom_water_flow__domain_max_of_log_law_roughness_length', # z0val_max 'channel_bottom_water_flow__domain_min_of_log_law_roughness_length', # z0val_min 'channel_bottom_water_flow__log_law_roughness_length', # z0val 'channel_bottom_water_flow__magnitude_of_shear_stress', # tau 'channel_bottom_water_flow__shear_speed', # u_star 'channel_centerline__sinuosity', # sinu 'channel_water__volume', # vol 'channel_water_flow__froude_number', # froude 'channel_water_flow__half_of_fanning_friction_factor', # f 'channel_water_flow__domain_max_of_manning_n_parameter', # nval_max 'channel_water_flow__domain_min_of_manning_n_parameter', # nval_min 'channel_water_flow__manning_n_parameter', # nval 'channel_water_surface__slope', # S_free #--------------------------------------------------- # These might only be available at the end of run. #--------------------------------------------------- 'channel_water_x-section__domain_max_of_mean_depth', # d_max 'channel_water_x-section__domain_min_of_mean_depth', # d_min 'channel_water_x-section__domain_max_of_volume_flow_rate', # Q_max 'channel_water_x-section__domain_min_of_volume_flow_rate', # Q_min 'channel_water_x-section__domain_max_of_volume_flux', # u_max 'channel_water_x-section__domain_min_of_volume_flux', # u_min #--------------------------------------------------------------------- 'channel_water_x-section__hydraulic_radius', # Rh 'channel_water_x-section__initial_mean_depth', # d0 'channel_water_x-section__mean_depth', # d 'channel_water_x-section__volume_flow_rate', # Q 'channel_water_x-section__volume_flux', # u 'channel_water_x-section__wetted_area', # A_wet 'channel_water_x-section__wetted_perimeter', # P_wet ## 'channel_water_x-section_top__width', # (not used) 'channel_x-section_trapezoid_bottom__width', # width 'channel_x-section_trapezoid_side__flare_angle', # angle 'land_surface_water__runoff_volume_flux', # R 'land_surface_water__domain_time_integral_of_runoff_volume_flux', # vol_R 'model__time_step', # dt 'model_grid_cell__area' ] # da _var_name_map = { 'atmosphere_water__rainfall_volume_flux': 'P_rain', 'glacier_ice__melt_volume_flux': 'MR', ## 'land_surface__elevation': 'DEM', ## 'land_surface__slope': 'S_bed', 'land_surface_water__baseflow_volume_flux': 'GW', 'land_surface_water__evaporation_volume_flux': 'ET', 'soil_surface_water__infiltration_volume_flux': 'IN', 'snowpack__melt_volume_flux': 'SM', 'water-liquid__mass-per-volume_density': 'rho_H2O', #------------------------------------------------------------------------ 'basin_outlet_water_flow__half_of_fanning_friction_factor':'f_outlet', 'basin_outlet_water_x-section__mean_depth': 'd_outlet', 'basin_outlet_water_x-section__peak_time_of_depth': 'Td_peak', 'basin_outlet_water_x-section__peak_time_of_volume_flow_rate': 'T_peak', 'basin_outlet_water_x-section__peak_time_of_volume_flux': 'Tu_peak', 'basin_outlet_water_x-section__volume_flow_rate': 'Q_outlet', 'basin_outlet_water_x-section__volume_flux': 'u_outlet', 'basin_outlet_water_x-section__time_integral_of_volume_flow_rate': 'vol_Q', 'basin_outlet_water_x-section__time_max_of_mean_depth': 'd_peak', 'basin_outlet_water_x-section__time_max_of_volume_flow_rate':'Q_peak', 'basin_outlet_water_x-section__time_max_of_volume_flux': 'u_peak', #-------------------------------------------------------------------------- 'canals_entrance_water__volume_flow_rate': 'Q_canals_in', #-------------------------------------------------------------------------- 'channel_bottom_surface__slope': 'S_bed', 'channel_bottom_water_flow__domain_max_of_log_law_roughness_length': 'z0val_max', 'channel_bottom_water_flow__domain_min_of_log_law_roughness_length': 'z0val_min', 'channel_bottom_water_flow__log_law_roughness_length': 'z0val', 'channel_bottom_water_flow__magnitude_of_shear_stress': 'tau', 'channel_bottom_water_flow__shear_speed': 'u_star', 'channel_centerline__sinuosity': 'sinu', 'channel_water__volume': 'vol', 'channel_water_flow__domain_max_of_manning_n_parameter': 'nval_max', 'channel_water_flow__domain_min_of_manning_n_parameter': 'nval_min', 'channel_water_flow__froude_number': 'froude', 'channel_water_flow__half_of_fanning_friction_factor': 'f', 'channel_water_flow__manning_n_parameter': 'nval', 'channel_water_surface__slope': 'S_free', #----------------------------------------------------------------------- 'channel_water_x-section__domain_max_of_mean_depth': 'd_max', 'channel_water_x-section__domain_min_of_mean_depth': 'd_min', 'channel_water_x-section__domain_max_of_volume_flow_rate': 'Q_max', 'channel_water_x-section__domain_min_of_volume_flow_rate': 'Q_min', 'channel_water_x-section__domain_max_of_volume_flux': 'u_max', 'channel_water_x-section__domain_min_of_volume_flux': 'u_min', #----------------------------------------------------------------------- 'channel_water_x-section__hydraulic_radius': 'Rh', 'channel_water_x-section__initial_mean_depth': 'd0', 'channel_water_x-section__mean_depth': 'd', 'channel_water_x-section__volume_flow_rate': 'Q', 'channel_water_x-section__volume_flux': 'u', 'channel_water_x-section__wetted_area': 'A_wet', 'channel_water_x-section__wetted_perimeter': 'P_wet', ## 'channel_water_x-section_top__width': # (not used) 'channel_x-section_trapezoid_bottom__width': 'width', #### 'channel_x-section_trapezoid_side__flare_angle': 'angle', #### 'land_surface_water__domain_time_integral_of_runoff_volume_flux': 'vol_R', 'land_surface_water__runoff_volume_flux': 'R', 'model__time_step': 'dt', 'model_grid_cell__area': 'da', #------------------------------------------------------------------ 'canals__count': 'n_canals', 'canals_entrance__x_coordinate': 'canals_in_x', 'canals_entrance__y_coordinate': 'canals_in_y', 'canals_entrance_water__volume_fraction': 'Q_canals_fraction', 'canals_exit__x_coordinate': 'canals_out_x', 'canals_exit__y_coordinate': 'canals_out_y', 'canals_exit_water__volume_flow_rate': 'Q_canals_out', 'sinks__count': 'n_sinks', 'sinks__x_coordinate': 'sinks_x', 'sinks__y_coordinate': 'sinks_y', 'sinks_water__volume_flow_rate': 'Q_sinks', 'sources__count': 'n_sources', 'sources__x_coordinate': 'sources_x', 'sources__y_coordinate': 'sources_y', 'sources_water__volume_flow_rate': 'Q_sources' } #------------------------------------------------ # Create an "inverse var name map" # inv_map = dict(zip(map.values(), map.keys())) #------------------------------------------------ ## _long_name_map = dict( zip(_var_name_map.values(), ## _var_name_map.keys() ) ) _var_units_map = { 'atmosphere_water__rainfall_volume_flux': 'm s-1', 'glacier_ice__melt_volume_flux': 'm s-1', ## 'land_surface__elevation': 'm', ## 'land_surface__slope': '1', 'land_surface_water__baseflow_volume_flux': 'm s-1', 'land_surface_water__evaporation_volume_flux': 'm s-1', 'soil_surface_water__infiltration_volume_flux': 'm s-1', 'snowpack__melt_volume_flux': 'm s-1', 'water-liquid__mass-per-volume_density': 'kg m-3', #--------------------------------------------------------------------------- 'basin_outlet_water_flow__half_of_fanning_friction_factor': '1', 'basin_outlet_water_x-section__mean_depth': 'm', 'basin_outlet_water_x-section__peak_time_of_depth': 'min', 'basin_outlet_water_x-section__peak_time_of_volume_flow_rate': 'min', 'basin_outlet_water_x-section__peak_time_of_volume_flux': 'min', 'basin_outlet_water_x-section__time_integral_of_volume_flow_rate': 'm3', 'basin_outlet_water_x-section__time_max_of_mean_depth': 'm', 'basin_outlet_water_x-section__time_max_of_volume_flow_rate': 'm3 s-1', 'basin_outlet_water_x-section__time_max_of_volume_flux': 'm s-1', 'basin_outlet_water_x-section__volume_flow_rate': 'm3', 'basin_outlet_water_x-section__volume_flux': 'm s-1', #--------------------------------------------------------------------------- 'canals_entrance_water__volume_flow_rate': 'm3 s-1', #--------------------------------------------------------------------------- 'channel_bottom_surface__slope': '1', 'channel_bottom_water_flow__domain_max_of_log_law_roughness_length': 'm', 'channel_bottom_water_flow__domain_min_of_log_law_roughness_length': 'm', 'channel_bottom_water_flow__log_law_roughness_length': 'm', 'channel_bottom_water_flow__magnitude_of_shear_stress': 'kg m-1 s-2', 'channel_bottom_water_flow__shear_speed': 'm s-1', 'channel_centerline__sinuosity': '1', 'channel_water__volume': 'm3', 'channel_water_flow__froude_number': '1', 'channel_water_flow__half_of_fanning_friction_factor': '1', 'channel_water_flow__manning_n_parameter': 'm-1/3 s', 'channel_water_flow__domain_max_of_manning_n_parameter': 'm-1/3 s', 'channel_water_flow__domain_min_of_manning_n_parameter': 'm-1/3 s', 'channel_water_surface__slope': '1', #--------------------------------------------------------------------<|fim▁hole|> 'channel_water_x-section__domain_max_of_volume_flux': 'm s-1', 'channel_water_x-section__domain_min_of_volume_flux': 'm s-1', #-------------------------------------------------------------------- 'channel_water_x-section__hydraulic_radius': 'm', 'channel_water_x-section__initial_mean_depth': 'm', 'channel_water_x-section__mean_depth': 'm', 'channel_water_x-section__volume_flow_rate': 'm3 s-1', 'channel_water_x-section__volume_flux': 'm s-1', 'channel_water_x-section__wetted_area': 'm2', 'channel_water_x-section__wetted_perimeter': 'm', 'channel_x-section_trapezoid_bottom__width': 'm', 'channel_x-section_trapezoid_side__flare_angle': 'rad', # CHECKED 'land_surface_water__domain_time_integral_of_runoff_volume_flux': 'm3', 'land_surface_water__runoff_volume_flux': 'm s-1', 'model__time_step': 's', 'model_grid_cell__area': 'm2', #------------------------------------------------------------------ 'canals__count': '1', 'canals_entrance__x_coordinate': 'm', 'canals_entrance__y_coordinate': 'm', 'canals_entrance_water__volume_fraction': '1', 'canals_exit__x_coordinate': 'm', 'canals_exit__y_coordinate': 'm', 'canals_exit_water__volume_flow_rate': 'm3 s-1', 'sinks__count': '1', 'sinks__x_coordinate': 'm', 'sinks__y_coordinate': 'm', 'sinks_water__volume_flow_rate': 'm3 s-1', 'sources__count': '1', 'sources__x_coordinate': 'm', 'sources__y_coordinate': 'm', 'sources_water__volume_flow_rate': 'm3 s-1' } #------------------------------------------------ # Return NumPy string arrays vs. Python lists ? #------------------------------------------------ ## _input_var_names = np.array( _input_var_names ) ## _output_var_names = np.array( _output_var_names ) #------------------------------------------------------------------- def get_input_var_names(self): #-------------------------------------------------------- # Note: These are currently variables needed from other # components vs. those read from files or GUI. #-------------------------------------------------------- return self._input_var_names # get_input_var_names() #------------------------------------------------------------------- def get_output_var_names(self): return self._output_var_names # get_output_var_names() #------------------------------------------------------------------- def get_var_name(self, long_var_name): return self._var_name_map[ long_var_name ] # get_var_name() #------------------------------------------------------------------- def get_var_units(self, long_var_name): return self._var_units_map[ long_var_name ] # get_var_units() #------------------------------------------------------------------- ## def get_var_type(self, long_var_name): ## ## #--------------------------------------- ## # So far, all vars have type "double", ## # but use the one in BMI_base instead. ## #--------------------------------------- ## return 'float64' ## ## # get_var_type() #------------------------------------------------------------------- def set_constants(self): #------------------------ # Define some constants #------------------------ self.g = np.float64(9.81) # (gravitation const.) self.aval = np.float64(0.476) # (integration const.) self.kappa = np.float64(0.408) # (von Karman's const.) self.law_const = np.sqrt(self.g) / self.kappa self.one_third = np.float64(1.0) / 3.0 self.two_thirds = np.float64(2.0) / 3.0 self.deg_to_rad = np.pi / 180.0 # set_constants() #------------------------------------------------------------------- def initialize(self, cfg_file=None, mode="nondriver", SILENT=False): if not(SILENT): print ' ' print 'Channels component: Initializing...' self.status = 'initializing' # (OpenMI 2.0 convention) self.mode = mode self.cfg_file = cfg_file #----------------------------------------------- # Load component parameters from a config file #----------------------------------------------- self.set_constants() # (12/7/09) # print 'CHANNELS calling initialize_config_vars()...' self.initialize_config_vars() # print 'CHANNELS calling read_grid_info()...' self.read_grid_info() #print 'CHANNELS calling initialize_basin_vars()...' self.initialize_basin_vars() # (5/14/10) #----------------------------------------- # This must come before "Disabled" test. #----------------------------------------- # print 'CHANNELS calling initialize_time_vars()...' self.initialize_time_vars() #---------------------------------- # Has component been turned off ? #---------------------------------- if (self.comp_status == 'Disabled'): if not(SILENT): print 'Channels component: Disabled.' self.SAVE_Q_GRIDS = False # (It is True by default.) self.SAVE_Q_PIXELS = False # (It is True by default.) self.DONE = True self.status = 'initialized' # (OpenMI 2.0 convention) return ## print '################################################' ## print 'min(d0), max(d0) =', self.d0.min(), self.d0.max() ## print '################################################' #--------------------------------------------- # Open input files needed to initialize vars #--------------------------------------------- # Can't move read_input_files() to start of # update(), since initial values needed here. #--------------------------------------------- # print 'CHANNELS calling open_input_files()...' self.open_input_files() print 'CHANNELS calling read_input_files()...' self.read_input_files() #----------------------- # Initialize variables #----------------------- print 'CHANNELS calling initialize_d8_vars()...' self.initialize_d8_vars() # (depend on D8 flow grid) print 'CHANNELS calling initialize_computed_vars()...' self.initialize_computed_vars() #-------------------------------------------------- # (5/12/10) I think this is obsolete now. #-------------------------------------------------- # Make sure self.Q_ts_file is not NULL (12/22/05) # This is only output file that is set by default # and is still NULL if user hasn't opened the # output var dialog for the channel process. #-------------------------------------------------- ## if (self.SAVE_Q_PIXELS and (self.Q_ts_file == '')): ## self.Q_ts_file = (self.case_prefix + '_0D-Q.txt') self.open_output_files() self.status = 'initialized' # (OpenMI 2.0 convention) # initialize() #------------------------------------------------------------------- ## def update(self, dt=-1.0, time_seconds=None): def update(self, dt=-1.0): #--------------------------------------------- # Note that u and d from previous time step # must be used on RHS of the equations here. #--------------------------------------------- self.status = 'updating' # (OpenMI 2.0 convention) #------------------------------------------------------- # There may be times where we want to call this method # even if component is not the driver. But note that # the TopoFlow driver also makes this same call. #------------------------------------------------------- if (self.mode == 'driver'): self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s]') ### interval=0.5) # [seconds] # For testing (5/19/12) # self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s] CHANNEL') ## DEBUG = True DEBUG = False #------------------------- # Update computed values #------------------------- if (DEBUG): print '#### Calling update_R()...' self.update_R() if (DEBUG): print '#### Calling update_R_integral()...' self.update_R_integral() if (DEBUG): print '#### Calling update_discharge()...' self.update_discharge() if (DEBUG): print '#### Calling update_diversions()...' self.update_diversions() if (DEBUG): print '#### Calling update_flow_volume()...' self.update_flow_volume() if (DEBUG): print '#### Calling update_flow_depth()...' self.update_flow_depth() #----------------------------------------------------------------- if not(self.DYNAMIC_WAVE): if (DEBUG): print '#### Calling update_trapezoid_Rh()...' self.update_trapezoid_Rh() # print 'Rhmin, Rhmax =', self.Rh.min(), self.Rh.max()a #----------------------------------------------------------------- # (9/9/14) Moved this here from update_velocity() methods. #----------------------------------------------------------------- if not(self.KINEMATIC_WAVE): if (DEBUG): print '#### Calling update_free_surface_slope()...' self.update_free_surface_slope() if (DEBUG): print '#### Calling update_shear_stress()...' self.update_shear_stress() if (DEBUG): print '#### Calling update_shear_speed()...' self.update_shear_speed() #----------------------------------------------------------------- # Must update friction factor before velocity for DYNAMIC_WAVE. #----------------------------------------------------------------- if (DEBUG): print '#### Calling update_friction_factor()...' self.update_friction_factor() #----------------------------------------------------------------- if (DEBUG): print '#### Calling update_velocity()...' self.update_velocity() self.update_velocity_on_edges() # (set to zero) if (DEBUG): print '#### Calling update_froude_number()...' self.update_froude_number() #----------------------------------------------------------------- ## print 'Rmin, Rmax =', self.R.min(), self.R.max() ## print 'Qmin, Qmax =', self.Q.min(), self.Q.max() ## print 'umin, umax =', self.u.min(), self.u.max() ## print 'dmin, dmax =', self.d.min(), self.d.max() ## print 'nmin, nmax =', self.nval.min(), self.nval.max() ## print 'Rhmin, Rhmax =', self.Rh.min(), self.Rh.max() ## print 'Smin, Smax =', self.S_bed.min(), self.S_bed.max() if (DEBUG): print '#### Calling update_outlet_values()...' self.update_outlet_values() if (DEBUG): print '#### Calling update peak values()...' self.update_peak_values() if (DEBUG): print '#### Calling update_Q_out_integral()...' self.update_Q_out_integral() #--------------------------------------------- # This takes extra time and is now done # only at the end, in finalize(). (8/19/13) #--------------------------------------------- # But then "topoflow_driver" doesn't get # correctly updated values for some reason. #--------------------------------------------- ## self.update_mins_and_maxes() #------------------------ # Check computed values #------------------------ D_OK = self.check_flow_depth() U_OK = self.check_flow_velocity() OK = (D_OK and U_OK) #------------------------------------------- # Read from files as needed to update vars #----------------------------------------------------- # NB! This is currently not needed for the "channel # process" because values don't change over time and # read_input_files() is called by initialize(). #----------------------------------------------------- # if (self.time_index > 0): # self.read_input_files() #---------------------------------------------- # Write user-specified data to output files ? #---------------------------------------------- # Components use own self.time_sec by default. #----------------------------------------------- if (DEBUG): print '#### Calling write_output_files()...' self.write_output_files() ## self.write_output_files( time_seconds ) #----------------------------- # Update internal clock # after write_output_files() #----------------------------- if (DEBUG): print '#### Calling update_time()' self.update_time( dt ) if (OK): self.status = 'updated' # (OpenMI 2.0 convention) else: self.status = 'failed' self.DONE = True # update() #------------------------------------------------------------------- def finalize(self): #--------------------------------------------------- # We can compute mins and maxes in the final grids # here, but the framework will not then pass them # to any component (e.g. topoflow_driver) that may # need them. #--------------------------------------------------- REPORT = True self.update_mins_and_maxes( REPORT=REPORT ) ## (2/6/13) self.print_final_report(comp_name='Channels component') self.status = 'finalizing' # (OpenMI) self.close_input_files() # TopoFlow input "data streams" self.close_output_files() self.status = 'finalized' # (OpenMI) #--------------------------- # Release all of the ports #---------------------------------------- # Make this call in "finalize()" method # of the component's CCA Imple file #---------------------------------------- # self.release_cca_ports( d_services ) # finalize() #------------------------------------------------------------------- def set_computed_input_vars(self): #--------------------------------------------------------------- # Note: The initialize() method calls initialize_config_vars() # (in BMI_base.py), which calls this method at the end. #-------------------------------------------------------------- cfg_extension = self.get_attribute( 'cfg_extension' ).lower() # cfg_extension = self.get_cfg_extension().lower() self.KINEMATIC_WAVE = ("kinematic" in cfg_extension) self.DIFFUSIVE_WAVE = ("diffusive" in cfg_extension) self.DYNAMIC_WAVE = ("dynamic" in cfg_extension) ########################################################## # (5/17/12) If MANNING, we need to set z0vals to -1 so # they are always defined for use with new framework. ########################################################## if (self.MANNING): if (self.nval != None): self.nval = np.float64( self.nval ) #### 10/9/10, NEED self.nval_min = self.nval.min() self.nval_max = self.nval.max() #----------------------------------- self.z0val = np.float64(-1) self.z0val_min = np.float64(-1) self.z0val_max = np.float64(-1) if (self.LAW_OF_WALL): if (self.z0val != None): self.z0val = np.float64( self.z0val ) #### (10/9/10) self.z0val_min = self.z0val.min() self.z0val_max = self.z0val.max() #----------------------------------- self.nval = np.float64(-1) self.nval_min = np.float64(-1) self.nval_max = np.float64(-1) #------------------------------------------- # These currently can't be set to anything # else in the GUI, but need to be defined. #------------------------------------------- self.code_type = 'Grid' self.slope_type = 'Grid' #--------------------------------------------------------- # Make sure that all "save_dts" are larger or equal to # the specified process dt. There is no point in saving # results more often than they change. # Issue a message to this effect if any are smaller ?? #--------------------------------------------------------- self.save_grid_dt = np.maximum(self.save_grid_dt, self.dt) self.save_pixels_dt = np.maximum(self.save_pixels_dt, self.dt) #--------------------------------------------------- # This is now done in CSDMS_base.read_config_gui() # for any var_name that starts with "SAVE_". #--------------------------------------------------- # self.SAVE_Q_GRID = (self.SAVE_Q_GRID == 'Yes') # set_computed_input_vars() #------------------------------------------------------------------- def initialize_d8_vars(self): #--------------------------------------------- # Compute and store a variety of (static) D8 # flow grid variables. Embed structure into # the "channel_base" component. #--------------------------------------------- self.d8 = d8_base.d8_component() ############################################### # (5/13/10) Do next line here for now, until # the d8 cfg_file includes static prefix. # Same is done in GW_base.py. ############################################### # tf_d8_base.read_grid_info() also needs # in_directory to be set. (10/27/11) ############################################### #-------------------------------------------------- # D8 component builds its cfg filename from these #-------------------------------------------------- self.d8.site_prefix = self.site_prefix self.d8.in_directory = self.in_directory self.d8.initialize( cfg_file=None, SILENT=self.SILENT, REPORT=self.REPORT ) ## self.code = self.d8.code # Don't need this. #------------------------------------------- # We'll need this once we shift from using # "tf_d8_base.py" to the new "d8_base.py" #------------------------------------------- # self.d8.update(self.time, SILENT=False, REPORT=True) # initialize_d8_vars() #------------------------------------------------------------- def initialize_computed_vars(self): #----------------------------------------------- # Convert bank angles from degrees to radians. #----------------------------------------------- self.angle = self.angle * self.deg_to_rad # [radians] #------------------------------------------------ # 8/29/05. Multiply ds by (unitless) sinuosity # Orig. ds is used by subsurface flow #------------------------------------------------ # NB! We should also divide slopes in S_bed by # the sinuosity, as now done here. #---------------------------------------------------- # NB! This saves a modified version of ds that # is only used within the "channels" component. # The original "ds" is stored within the # topoflow model component and is used for # subsurface flow, etc. #---------------------------------------------------- ### self.d8.ds_chan = (self.sinu * ds) ### self.ds = (self.sinu * self.d8.ds) self.d8.ds = (self.sinu * self.d8.ds) ### USE LESS MEMORY ################################################### ################################################### ### S_bed = (S_bed / self.sinu) #************* self.slope = (self.slope / self.sinu) self.S_bed = self.slope ################################################### ################################################### #--------------------------- # Initialize spatial grids #----------------------------------------------- # NB! It is not a good idea to initialize the # water depth grid to a nonzero scalar value. #----------------------------------------------- print 'Initializing u, f, d grids...' self.u = np.zeros([self.ny, self.nx], dtype='Float64') self.f = np.zeros([self.ny, self.nx], dtype='Float64') self.d = np.zeros([self.ny, self.nx], dtype='Float64') + self.d0 ######################################################### # Add this on (2/3/13) so make the TF driver happy # during its initialize when it gets reference to R. # But in "update_R()", be careful not to break the ref. # "Q" may be subject to the same issue. ######################################################### self.Q = np.zeros([self.ny, self.nx], dtype='Float64') self.R = np.zeros([self.ny, self.nx], dtype='Float64') #--------------------------------------------------- # Initialize new grids. Is this needed? (9/13/14) #--------------------------------------------------- self.tau = np.zeros([self.ny, self.nx], dtype='Float64') self.u_star = np.zeros([self.ny, self.nx], dtype='Float64') self.froude = np.zeros([self.ny, self.nx], dtype='Float64') #--------------------------------------- # These are used to check mass balance #--------------------------------------- self.vol_R = self.initialize_scalar( 0, dtype='float64') self.vol_Q = self.initialize_scalar( 0, dtype='float64') #------------------------------------------- # Make sure all slopes are valid & nonzero # since otherwise flow will accumulate #------------------------------------------- if (self.KINEMATIC_WAVE): self.remove_bad_slopes() #(3/8/07. Only Kin Wave case) #---------------------------------------- # Initial volume of water in each pixel #----------------------------------------------------------- # Note: angles were read as degrees & converted to radians #----------------------------------------------------------- L2 = self.d * np.tan(self.angle) self.A_wet = self.d * (self.width + L2) self.P_wet = self.width + (np.float64(2) * self.d / np.cos(self.angle) ) self.vol = self.A_wet * self.d8.ds # [m3] #------------------------------------------------------- # Note: depth is often zero at the start of a run, and # both width and then P_wet are also zero in places. # Therefore initialize Rh as shown. #------------------------------------------------------- self.Rh = np.zeros([self.ny, self.nx], dtype='Float64') ## self.Rh = self.A_wet / self.P_wet # [m] ## print 'P_wet.min() =', self.P_wet.min() ## print 'width.min() =', self.width.min() ## self.initialize_diversion_vars() # (9/22/14) self.initialize_outlet_values() self.initialize_peak_values() self.initialize_min_and_max_values() ## (2/3/13) ######################################### # Maybe save all refs in a dictionary # called "self_values" here ? (2/19/13) # Use a "reverse" var_name mapping? # inv_map = dict(zip(map.values(), map.keys())) ######################################### ## w = np.where( self.width <= 0 ) ## nw = np.size( w[0] ) # (This is correct for 1D or 2D.) ## if (nw > 0): ## print 'WARNING:' ## print 'Number of locations where width==0 =', nw ## if (nw < 10): ## print 'locations =', w ## print ' ' # initialize_computed_vars() #------------------------------------------------------------- def initialize_diversion_vars(self): #----------------------------------------- # Compute source IDs from xy coordinates #----------------------------------------- source_rows = np.int32( self.sources_y / self.ny ) source_cols = np.int32( self.sources_x / self.nx ) self.source_IDs = (source_rows, source_cols) ## self.source_IDs = (source_rows * self.nx) + source_cols #--------------------------------------- # Compute sink IDs from xy coordinates #--------------------------------------- sink_rows = np.int32( self.sinks_y / self.ny ) sink_cols = np.int32( self.sinks_x / self.nx ) self.sink_IDs = (sink_rows, sink_cols) ## self.sink_IDs = (sink_rows * self.nx) + sink_cols #------------------------------------------------- # Compute canal entrance IDs from xy coordinates #------------------------------------------------- canal_in_rows = np.int32( self.canals_in_y / self.ny ) canal_in_cols = np.int32( self.canals_in_x / self.nx ) self.canal_in_IDs = (canal_in_rows, canal_in_cols) ## self.canal_in_IDs = (canal_in_rows * self.nx) + canal_in_cols #--------------------------------------------- # Compute canal exit IDs from xy coordinates #--------------------------------------------- canal_out_rows = np.int32( self.canals_out_y / self.ny ) canal_out_cols = np.int32( self.canals_out_x / self.nx ) self.canal_out_IDs = (canal_out_rows, canal_out_cols) ## self.canal_out_IDs = (canal_out_rows * self.nx) + canal_out_cols #-------------------------------------------------- # This will be computed from Q_canal_fraction and # self.Q and then passed back to Diversions #-------------------------------------------------- self.Q_canals_in = np.array( self.n_sources, dtype='float64' ) # initialize_diversion_vars() #------------------------------------------------------------------- def initialize_outlet_values(self): #--------------------------------------------------- # Note: These are retrieved and used by TopoFlow # for the stopping condition. TopoFlow # receives a reference to these, but in # order to see the values change they need # to be stored as mutable, 1D numpy arrays. #--------------------------------------------------- # Note: Q_last is internal to TopoFlow. #--------------------------------------------------- # self.Q_outlet = self.Q[ self.outlet_ID ] self.Q_outlet = self.initialize_scalar(0, dtype='float64') self.u_outlet = self.initialize_scalar(0, dtype='float64') self.d_outlet = self.initialize_scalar(0, dtype='float64') self.f_outlet = self.initialize_scalar(0, dtype='float64') # initialize_outlet_values() #------------------------------------------------------------------- def initialize_peak_values(self): #------------------------- # Initialize peak values #------------------------- self.Q_peak = self.initialize_scalar(0, dtype='float64') self.T_peak = self.initialize_scalar(0, dtype='float64') self.u_peak = self.initialize_scalar(0, dtype='float64') self.Tu_peak = self.initialize_scalar(0, dtype='float64') self.d_peak = self.initialize_scalar(0, dtype='float64') self.Td_peak = self.initialize_scalar(0, dtype='float64') # initialize_peak_values() #------------------------------------------------------------------- def initialize_min_and_max_values(self): #------------------------------- # Initialize min & max values # (2/3/13), for new framework. #------------------------------- v = 1e6 self.Q_min = self.initialize_scalar(v, dtype='float64') self.Q_max = self.initialize_scalar(-v, dtype='float64') self.u_min = self.initialize_scalar(v, dtype='float64') self.u_max = self.initialize_scalar(-v, dtype='float64') self.d_min = self.initialize_scalar(v, dtype='float64') self.d_max = self.initialize_scalar(-v, dtype='float64') # initialize_min_and_max_values() #------------------------------------------------------------------- # def update_excess_rainrate(self): def update_R(self): #---------------------------------------- # Compute the "excess rainrate", R. # Each term must have same units: [m/s] # Sum = net gain/loss rate over pixel. #---------------------------------------------------- # R can be positive or negative. If negative, then # water is removed from the surface at rate R until # surface water is consumed. #-------------------------------------------------------------- # P = precip_rate [m/s] (converted by read_input_data()). # SM = snowmelt rate [m/s] # GW = seep rate [m/s] (water_table intersects surface) # ET = evap rate [m/s] # IN = infil rate [m/s] # MR = icemelt rate [m/s] #------------------------------------------------------------ # Use refs to other comp vars from new framework. (5/18/12) #------------------------------------------------------------ P = self.P_rain # (This is now liquid-only precip. 9/14/14) SM = self.SM GW = self.GW ET = self.ET IN = self.IN MR = self.MR ## if (self.DEBUG): ## print 'At time:', self.time_min, ', P =', P, '[m/s]' #-------------- # For testing #-------------- ## print '(Pmin, Pmax) =', P.min(), P.max() ## print '(SMmin, SMmax) =', SM.min(), SM.max() ## print '(GWmin, GWmax) =', GW.min(), GW.max() ## print '(ETmin, ETmax) =', ET.min(), ET.max() ## print '(INmin, INmax) =', IN.min(), IN.max() ## print '(MRmin, MRmax) =', MR.min(), MR.max() ## # print '(Hmin, Hmax) =', H.min(), H.max() ## print ' ' self.R = (P + SM + GW + MR) - (ET + IN) # update_R() #------------------------------------------------------------------- def update_R_integral(self): #----------------------------------------------- # Update mass total for R, sum over all pixels #----------------------------------------------- volume = np.double(self.R * self.da * self.dt) # [m^3] if (np.size(volume) == 1): self.vol_R += (volume * self.rti.n_pixels) else: self.vol_R += np.sum(volume) # update_R_integral() #------------------------------------------------------------------- def update_discharge(self): #--------------------------------------------------------- # The discharge grid, Q, gives the flux of water _out_ # of each grid cell. This entire amount then flows # into one of the 8 neighbor grid cells, as indicated # by the D8 flow code. The update_flow_volume() function # is called right after this one in update() and uses # the Q grid. #--------------------------------------------------------- # 7/15/05. The cross-sectional area of a trapezoid is # given by: Ac = d * (w + (d * tan(theta))), # where w is the bottom width. If we were to # use: Ac = w * d, then we'd have Ac=0 when w=0. # We also need angle units to be radians. #--------------------------------------------------------- #----------------------------- # Compute the discharge grid #------------------------------------------------------ # A_wet is initialized in initialize_computed_vars(). # A_wet is updated in update_trapezoid_Rh(). #------------------------------------------------------ ### self.Q = np.float64(self.u * A_wet) self.Q[:] = self.u * self.A_wet ## (2/19/13, in place) #-------------- # For testing #-------------- ## print '(umin, umax) =', self.u.min(), self.u.max() ## print '(d0min, d0max) =', self.d0.min(), self.d0.max() ## print '(dmin, dmax) =', self.d.min(), self.d.max() ## print '(amin, amax) =', self.angle.min(), self.angle.max() ## print '(wmin, wmax) =', self.width.min(), self.width.max() ## print '(Qmin, Qmax) =', self.Q.min(), self.Q.max() ## print '(L2min, L2max) =', L2.min(), L2.max() ## print '(Qmin, Qmax) =', self.Q.min(), self.Q.max() #-------------- # For testing #-------------- # print 'dmin, dmax =', self.d.min(), self.d.max() # print 'umin, umax =', self.u.min(), self.u.max() # print 'Qmin, Qmax =', self.Q.min(), self.Q.max() # print ' ' # print 'u(outlet) =', self.u[self.outlet_ID] # print 'Q(outlet) =', self.Q[self.outlet_ID] ######## #---------------------------------------------------- # Wherever depth is less than z0, assume that water # is not flowing and set u and Q to zero. # However, we also need (d gt 0) to avoid a divide # by zero problem, even when numerators are zero. #---------------------------------------------------- # FLOWING = (d > (z0/aval)) #*** FLOWING[self.d8.noflow_IDs] = False ;****** # u = (u * FLOWING) # Q = (Q * FLOWING) # d = np.maximum(d, 0.0) ;(allow depths lt z0, if gt 0.) # update_discharge() #------------------------------------------------------------------- def update_diversions(self): #-------------------------------------------------------------- # Note: The Channel component requests the following input # vars from the Diversions component by including # them in its "get_input_vars()": # (1) Q_sources, Q_sources_x, Q_sources_y # (2) Q_sinks, Q_sinks_x, Q_sinks_y # (3) Q_canals_out, Q_canals_out_x, Q_canals_out_y # (4) Q_canals_fraction, Q_canals_in_x, Q_canals_in_y. # source_IDs are computed from (x,y) coordinates during # initialize(). # # Diversions component needs to get Q_canals_in from the # Channel component. #-------------------------------------------------------------- # Note: This *must* be called after update_discharge() and # before update_flow_volume(). #-------------------------------------------------------------- # Note: The Q grid stores the volume flow rate *leaving* each # grid cell in the domain. For sources, an extra amount # is leaving the cell which can flow into its D8 parent # cell. For sinks, a lesser amount is leaving the cell # toward the D8 parent. #-------------------------------------------------------------- # Note: It is not enough to just update Q and then call the # update_flow_volume() method. This is because it # won't update the volume in the channels in the grid # cells that the extra discharge is leaving from. #-------------------------------------------------------------- # If a grid cell contains a "source", then an additional Q # will flow *into* that grid cell and increase flow volume. #-------------------------------------------------------------- #------------------------------------------------------------- # This is not fully tested but runs. However, the Diversion # vars are still computed even when Diversions component is # disabled. So it slows things down somewhat. #------------------------------------------------------------- return ######################## ######################## #---------------------------------------- # Update Q and vol due to point sources #---------------------------------------- ## if (hasattr(self, 'source_IDs')): if (self.n_sources > 0): self.Q[ self.source_IDs ] += self.Q_sources self.vol[ self.source_IDs ] += (self.Q_sources * self.dt) #-------------------------------------- # Update Q and vol due to point sinks #-------------------------------------- ## if (hasattr(self, 'sink_IDs')): if (self.n_sinks > 0): self.Q[ self.sink_IDs ] -= self.Q_sinks self.vol[ self.sink_IDs ] -= (self.Q_sinks * self.dt) #--------------------------------------- # Update Q and vol due to point canals #--------------------------------------- ## if (hasattr(self, 'canal_in_IDs')): if (self.n_canals > 0): #----------------------------------------------------------------- # Q grid was just modified. Apply the canal diversion fractions # to compute the volume flow rate into upstream ends of canals. #----------------------------------------------------------------- Q_canals_in = self.Q_canals_fraction * self.Q[ self.canal_in_IDs ] self.Q_canals_in = Q_canals_in #---------------------------------------------------- # Update Q and vol due to losses at canal entrances #---------------------------------------------------- self.Q[ self.canal_in_IDs ] -= Q_canals_in self.vol[ self.canal_in_IDs ] -= (Q_canals_in * self.dt) #------------------------------------------------- # Update Q and vol due to gains at canal exits. # Diversions component accounts for travel time. #------------------------------------------------- self.Q[ self.canal_out_IDs ] += self.Q_canals_out self.vol[ self.canal_out_IDs ] += (self.Q_canals_out * self.dt) # update_diversions() #------------------------------------------------------------------- def update_flow_volume(self): #----------------------------------------------------------- # Notes: This function must be called after # update_discharge() and update_diversions(). #----------------------------------------------------------- # Notes: Q = surface discharge [m^3/s] # R = excess precip. rate [m/s] # da = pixel area [m^2] # dt = channel flow timestep [s] # vol = total volume of water in pixel [m^3] # v2 = temp version of vol # w1 = IDs of pixels that... # p1 = IDs of parent pixels that... #----------------------------------------------------------- dt = self.dt # [seconds] #---------------------------------------------------- # Add contribution (or loss ?) from excess rainrate #---------------------------------------------------- # Contributions over entire grid cell from rainfall, # snowmelt, icemelt and baseflow (minus losses from # evaporation and infiltration) are assumed to flow # into the channel within the grid cell. # Note that R is allowed to be negative. #---------------------------------------------------- self.vol += (self.R * self.da) * dt # (in place) #----------------------------------------- # Add contributions from neighbor pixels #------------------------------------------------------------- # Each grid cell passes flow to *one* downstream neighbor. # Note that multiple grid cells can flow toward a given grid # cell, so a grid cell ID may occur in d8.p1 and d8.p2, etc. #------------------------------------------------------------- # (2/16/10) RETEST THIS. Before, a copy called "v2" was # used but this doesn't seem to be necessary. #------------------------------------------------------------- if (self.d8.p1_OK): self.vol[ self.d8.p1 ] += (dt * self.Q[self.d8.w1]) if (self.d8.p2_OK): self.vol[ self.d8.p2 ] += (dt * self.Q[self.d8.w2]) if (self.d8.p3_OK): self.vol[ self.d8.p3 ] += (dt * self.Q[self.d8.w3]) if (self.d8.p4_OK): self.vol[ self.d8.p4 ] += (dt * self.Q[self.d8.w4]) if (self.d8.p5_OK): self.vol[ self.d8.p5 ] += (dt * self.Q[self.d8.w5]) if (self.d8.p6_OK): self.vol[ self.d8.p6 ] += (dt * self.Q[self.d8.w6]) if (self.d8.p7_OK): self.vol[ self.d8.p7 ] += (dt * self.Q[self.d8.w7]) if (self.d8.p8_OK): self.vol[ self.d8.p8 ] += (dt * self.Q[self.d8.w8]) #---------------------------------------------------- # Subtract the amount that flows out to D8 neighbor #---------------------------------------------------- self.vol -= (self.Q * dt) # (in place) #-------------------------------------------------------- # While R can be positive or negative, the surface flow # volume must always be nonnegative. This also ensures # that the flow depth is nonnegative. (7/13/06) #-------------------------------------------------------- ## self.vol = np.maximum(self.vol, 0.0) ## self.vol[:] = np.maximum(self.vol, 0.0) # (2/19/13) np.maximum( self.vol, 0.0, self.vol ) # (in place) # update_flow_volume #------------------------------------------------------------------- def update_flow_depth(self): #----------------------------------------------------------- # Notes: 7/18/05. Modified to use the equation for volume # of a trapezoidal channel: vol = Ac * ds, where # Ac=d*[w + d*tan(t)], and to solve the resulting # quadratic (discarding neg. root) for new depth, d. # 8/29/05. Now original ds is used for subsurface # flow and there is a ds_chan which can include a # sinuosity greater than 1. This may be especially # important for larger pixel sizes. # Removed (ds > 1) here which was only meant to # avoid a "divide by zero" error at pixels where # (ds eq 0). This isn't necessary since the # Flow_Lengths function in utils_TF.pro never # returns a value of zero. #---------------------------------------------------------- # Modified to avoid double where calls, which # reduced cProfile run time for this method from # 1.391 to 0.644. (9/23/14) #---------------------------------------------------------- # Commented this out on (2/18/10) because it doesn't # seem to be used anywhere now. Checked all # of the Channels components. #---------------------------------------------------------- # self.d_last = self.d.copy() #----------------------------------- # Make some local aliases and vars #----------------------------------------------------------- # Note: angles were read as degrees & converted to radians #----------------------------------------------------------- d = self.d width = self.width ### angle = self.angle SCALAR_ANGLES = (np.size(angle) == 1) #------------------------------------------------------ # (2/18/10) New code to deal with case where the flow # depth exceeds a bankfull depth. # For now, d_bankfull is hard-coded. # # CHANGE Manning's n here, too? #------------------------------------------------------ d_bankfull = 4.0 # [meters] ################################ wb = (self.d > d_bankfull) # (array of True or False) self.width[ wb ] = self.d8.dw[ wb ] if not(SCALAR_ANGLES): self.angle[ wb ] = 0.0 # w_overbank = np.where( d > d_bankfull ) # n_overbank = np.size( w_overbank[0] ) # if (n_overbank != 0): # width[ w_overbank ] = self.d8.dw[ w_overbank ] # if not(SCALAR_ANGLES): angle[w_overbank] = 0.0 #------------------------------------------------------ # (2/18/10) New code to deal with case where the top # width exceeds the grid cell width, dw. #------------------------------------------------------ top_width = width + (2.0 * d * np.sin(self.angle)) wb = (top_width > self.d8.dw) # (array of True or False) self.width[ wb ] = self.d8.dw[ wb ] if not(SCALAR_ANGLES): self.angle[ wb ] = 0.0 # wb = np.where(top_width > self.d8.dw) # nb = np.size(w_bad[0]) # if (nb != 0): # width[ wb ] = self.d8.dw[ wb ] # if not(SCALAR_ANGLES): angle[ wb ] = 0.0 #---------------------------------- # Is "angle" a scalar or a grid ? #---------------------------------- if (SCALAR_ANGLES): if (angle == 0.0): d = self.vol / (width * self.d8.ds) else: denom = 2.0 * np.tan(angle) arg = 2.0 * denom * self.vol / self.d8.ds arg += width**(2.0) d = (np.sqrt(arg) - width) / denom else: #----------------------------------------------------- # Pixels where angle is 0 must be handled separately #----------------------------------------------------- w1 = ( angle == 0 ) # (arrays of True or False) w2 = np.invert( w1 ) #----------------------------------- A_top = width[w1] * self.d8.ds[w1] d[w1] = self.vol[w1] / A_top #----------------------------------- denom = 2.0 * np.tan(angle[w2]) arg = 2.0 * denom * self.vol[w2] / self.d8.ds[w2] arg += width[w2]**(2.0) d[w2] = (np.sqrt(arg) - width[w2]) / denom #----------------------------------------------------- # Pixels where angle is 0 must be handled separately #----------------------------------------------------- # wz = np.where( angle == 0 ) # nwz = np.size( wz[0] ) # wzc = np.where( angle != 0 ) # nwzc = np.size( wzc[0] ) # # if (nwz != 0): # A_top = width[wz] * self.d8.ds[wz] # ## A_top = self.width[wz] * self.d8.ds_chan[wz] # d[wz] = self.vol[wz] / A_top # # if (nwzc != 0): # term1 = 2.0 * np.tan(angle[wzc]) # arg = 2.0 * term1 * self.vol[wzc] / self.d8.ds[wzc] # arg += width[wzc]**(2.0) # d[wzc] = (np.sqrt(arg) - width[wzc]) / term1 #------------------------------------------ # Set depth values on edges to zero since # they become spikes (no outflow) 7/15/06 #------------------------------------------ d[ self.d8.noflow_IDs ] = 0.0 #------------------------------------------------ # 4/19/06. Force flow depth to be positive ? #------------------------------------------------ # This seems to be needed with the non-Richards # infiltration routines when starting with zero # depth everywhere, since all water infiltrates # for some period of time. It also seems to be # needed more for short rainfall records to # avoid a negative flow depth error. #------------------------------------------------ # 7/13/06. Still needed for Richards method #------------------------------------------------ ## self.d = np.maximum(d, 0.0) np.maximum(d, 0.0, self.d) # (2/19/13, in place) #------------------------------------------------- # Find where d <= 0 and save for later (9/23/14) #------------------------------------------------- self.d_is_pos = (self.d > 0) self.d_is_neg = np.invert( self.d_is_pos ) # update_flow_depth #------------------------------------------------------------------- def update_free_surface_slope(self): #----------------------------------------------------------- # Notes: It is assumed that the flow directions don't # change even though the free surface is changing. #----------------------------------------------------------- delta_d = (self.d - self.d[self.d8.parent_IDs]) self.S_free[:] = self.S_bed + (delta_d / self.d8.ds) #-------------------------------------------- # Don't do this; negative slopes are needed # to decelerate flow in dynamic wave case # and for backwater effects. #-------------------------------------------- # Set negative slopes to zero #------------------------------ ### self.S_free = np.maximum(self.S_free, 0) # update_free_surface_slope() #------------------------------------------------------------------- def update_shear_stress(self): #-------------------------------------------------------- # Notes: 9/9/14. Added so shear stress could be shared. # This uses the depth-slope product. #-------------------------------------------------------- if (self.KINEMATIC_WAVE): slope = self.S_bed else: slope = self.S_free self.tau[:] = self.rho_H2O * self.g * self.d * slope # update_shear_stress() #------------------------------------------------------------------- def update_shear_speed(self): #-------------------------------------------------------- # Notes: 9/9/14. Added so shear speed could be shared. #-------------------------------------------------------- self.u_star[:] = np.sqrt( self.tau / self.rho_H2O ) # update_shear_speed() #------------------------------------------------------------------- def update_trapezoid_Rh(self): #------------------------------------------------------------- # Notes: Compute the hydraulic radius of a trapezoid that: # (1) has a bed width of wb >= 0 (0 for triangular) # (2) has a bank angle of theta (0 for rectangular) # (3) is filled with water to a depth of d. # The units of wb and d are meters. The units of # theta are assumed to be degrees and are converted. #------------------------------------------------------------- # NB! wb should never be zero, so P_wet can never be 0, # which would produce a NaN (divide by zero). #------------------------------------------------------------- # See Notes for TF_Tan function in utils_TF.pro # AW = d * (wb + (d * TF_Tan(theta_rad)) ) #------------------------------------------------------------- # 9/9/14. Bug fix. Angles were already in radians but # were converted to radians again. #-------------------------------------------------------------- #--------------------------------------------------------- # Compute hydraulic radius grid for trapezoidal channels #----------------------------------------------------------- # Note: angles were read as degrees & converted to radians #----------------------------------------------------------- d = self.d # (local synonyms) wb = self.width # (trapezoid bottom width) L2 = d * np.tan( self.angle ) A_wet = d * (wb + L2) P_wet = wb + (np.float64(2) * d / np.cos(self.angle) ) #--------------------------------------------------- # At noflow_IDs (e.g. edges) P_wet may be zero # so do this to avoid "divide by zero". (10/29/11) #--------------------------------------------------- P_wet[ self.d8.noflow_IDs ] = np.float64(1) Rh = (A_wet / P_wet) #-------------------------------- # w = np.where(P_wet == 0) # print 'In update_trapezoid_Rh():' # print ' P_wet= 0 at', w[0].size, 'cells' #------------------------------------ # Force edge pixels to have Rh = 0. # This will make u = 0 there also. #------------------------------------ Rh[ self.d8.noflow_IDs ] = np.float64(0) ## w = np.where(wb <= 0) ## nw = np.size(w[0]) ## if (nw > 0): Rh[w] = np.float64(0) self.Rh[:] = Rh self.A_wet[:] = A_wet ## (Now shared: 9/9/14) self.P_wet[:] = P_wet ## (Now shared: 9/9/14) #--------------- # For testing #-------------- ## print 'dmin, dmax =', d.min(), d.max() ## print 'wmin, wmax =', wb.min(), wb.max() ## print 'amin, amax =', self.angle.min(), self.angle.max() # update_trapezoid_Rh() #------------------------------------------------------------------- def update_friction_factor(self): #---------------------------------------- # Note: Added on 9/9/14 to streamline. #---------------------------------------------------------- # Note: f = half of the Fanning friction factor # d = flow depth [m] # z0 = roughness length # S = bed slope (assumed equal to friction slope) # g = 9.81 = gravitation constant [m/s^2] #--------------------------------------------------------- # For law of the wall: # kappa = 0.41 = von Karman's constant # aval = 0.48 = integration constant # law_const = sqrt(g)/kappa = 7.6393d # smoothness = (aval / z0) * d # f = (kappa / alog(smoothness))^2d # tau_bed = rho_w * f * u^2 = rho_w * g * d * S # d, S, and z0 can be arrays. # To make default z0 correspond to default # Manning's n, can use this approximation: # z0 = a * (2.34 * sqrt(9.81) * n / kappa)^6d # For n=0.03, this gives: z0 = 0.011417 ######################################################### # However, for n=0.3, it gives: z0 = 11417.413 # which is 11.4 km! So the approximation only # holds within some range of values. #-------------------------------------------------------- ############################################################### # cProfile: This method took: 0.369 secs for topoflow_test() ############################################################### #-------------------------------------- # Find where (d <= 0). g=good, b=bad #-------------------------------------- wg = self.d_is_pos wb = self.d_is_neg # wg = ( self.d > 0 ) # wb = np.invert( wg ) #----------------------------- # Compute f for Manning case #----------------------------------------- # This makes f=0 and du=0 where (d <= 0) #----------------------------------------- if (self.MANNING): n2 = self.nval ** np.float64(2) self.f[ wg ] = self.g * (n2[wg] / (self.d[wg] ** self.one_third)) self.f[ wb ] = np.float64(0) #--------------------------------- # Compute f for Law of Wall case #--------------------------------- if (self.LAW_OF_WALL): #------------------------------------------------ # Make sure (smoothness > 1) before taking log. # Should issue a warning if this is used. #------------------------------------------------ smoothness = (self.aval / self.z0val) * self.d np.maximum(smoothness, np.float64(1.1), smoothness) # (in place) self.f[wg] = (self.kappa / np.log(smoothness[wg])) ** np.float64(2) self.f[wb] = np.float64(0) ############################################################## # cProfile: This method took: 0.93 secs for topoflow_test() ############################################################## # #-------------------------------------- # # Find where (d <= 0). g=good, b=bad # #-------------------------------------- # wg = np.where( self.d > 0 ) # ng = np.size( wg[0]) # wb = np.where( self.d <= 0 ) # nb = np.size( wb[0] ) # # #----------------------------- # # Compute f for Manning case # #----------------------------------------- # # This makes f=0 and du=0 where (d <= 0) # #----------------------------------------- # if (self.MANNING): # n2 = self.nval ** np.float64(2) # if (ng != 0): # self.f[wg] = self.g * (n2[wg] / (self.d[wg] ** self.one_third)) # if (nb != 0): # self.f[wb] = np.float64(0) # # #--------------------------------- # # Compute f for Law of Wall case # #--------------------------------- # if (self.LAW_OF_WALL): # #------------------------------------------------ # # Make sure (smoothness > 1) before taking log. # # Should issue a warning if this is used. # #------------------------------------------------ # smoothness = (self.aval / self.z0val) * self.d # np.maximum(smoothness, np.float64(1.1), smoothness) # (in place) # ## smoothness = np.maximum(smoothness, np.float64(1.1)) # if (ng != 0): # self.f[wg] = (self.kappa / np.log(smoothness[wg])) ** np.float64(2) # if (nb != 0): # self.f[wb] = np.float64(0) #--------------------------------------------- # We could share the Fanning friction factor #--------------------------------------------- ### self.fanning = (np.float64(2) * self.f) # update_friction_factor() #------------------------------------------------------------------- def update_velocity(self): #--------------------------------------------------------- # Note: Do nothing now unless this method is overridden # by a particular method of computing velocity. #--------------------------------------------------------- print "Warning: update_velocity() method is inactive." # print 'KINEMATIC WAVE =', self.KINEMATIC_WAVE # print 'DIFFUSIVE WAVE =', self.DIFFUSIVE_WAVE # print 'DYNAMIC WAVE =', self.DYNAMIC_WAVE # update_velocity() #------------------------------------------------------------------- def update_velocity_on_edges(self): #--------------------------------- # Force edge pixels to have u=0. #---------------------------------------- # Large slope around 1 flows into small # slope & leads to a negative velocity. #---------------------------------------- self.u[ self.d8.noflow_IDs ] = np.float64(0) # update_velocity_on_edges() #------------------------------------------------------------------- def update_froude_number(self): #---------------------------------------------------------- # Notes: 9/9/14. Added so Froude number could be shared. # This use of wg & wb reduced cProfile time from: # 0.644 sec to: 0.121. (9/23/14) #---------------------------------------------------------- # g = good, b = bad #-------------------- wg = self.d_is_pos wb = self.d_is_neg self.froude[ wg ] = self.u[wg] / np.sqrt( self.g * self.d[wg] ) self.froude[ wb ] = np.float64(0) # update_froude_number() #------------------------------------------------------------- def update_outlet_values(self): #------------------------------------------------- # Save computed values at outlet, which are used # by the TopoFlow driver. #----------------------------------------------------- # Note that Q_outlet, etc. are defined as 0D numpy # arrays to make them "mutable scalars" (i.e. # this allows changes to be seen by other components # who have a reference. To preserver the reference, # however, we must use fill() to assign a new value. #----------------------------------------------------- Q_outlet = self.Q[ self.outlet_ID ] u_outlet = self.u[ self.outlet_ID ] d_outlet = self.d[ self.outlet_ID ] f_outlet = self.f[ self.outlet_ID ] self.Q_outlet.fill( Q_outlet ) self.u_outlet.fill( u_outlet ) self.d_outlet.fill( d_outlet ) self.f_outlet.fill( f_outlet ) ## self.Q_outlet.fill( self.Q[ self.outlet_ID ] ) ## self.u_outlet.fill( self.u[ self.outlet_ID ] ) ## self.d_outlet.fill( self.d[ self.outlet_ID ] ) ## self.f_outlet.fill( self.f[ self.outlet_ID ] ) ## self.Q_outlet = self.Q[ self.outlet_ID ] ## self.u_outlet = self.u[ self.outlet_ID ] ## self.d_outlet = self.d[ self.outlet_ID ] ## self.f_outlet = self.f[ self.outlet_ID ] ## self.Q_outlet = self.Q.flat[self.outlet_ID] ## self.u_outlet = self.u.flat[self.outlet_ID] ## self.d_outlet = self.d.flat[self.outlet_ID] ## self.f_outlet = self.f.flat[self.outlet_ID] # update_outlet_values() #------------------------------------------------------------- def update_peak_values(self): if (self.Q_outlet > self.Q_peak): self.Q_peak.fill( self.Q_outlet ) self.T_peak.fill( self.time_min ) # (time to peak) #--------------------------------------- if (self.u_outlet > self.u_peak): self.u_peak.fill( self.u_outlet ) self.Tu_peak.fill( self.time_min ) #--------------------------------------- if (self.d_outlet > self.d_peak): self.d_peak.fill( self.d_outlet ) self.Td_peak.fill( self.time_min ) ## if (self.Q_outlet > self.Q_peak): ## self.Q_peak = self.Q_outlet ## self.T_peak = self.time_min # (time to peak) ## #----------------------------------- ## if (self.u_outlet > self.u_peak): ## self.u_peak = self.u_outlet ## self.Tu_peak = self.time_min ## #----------------------------------- ## if (self.d_outlet > self.d_peak): ## self.d_peak = self.d_outlet ## self.Td_peak = self.time_min # update_peak_values() #------------------------------------------------------------- def update_Q_out_integral(self): #-------------------------------------------------------- # Note: Renamed "volume_out" to "vol_Q" for consistency # with vol_P, vol_SM, vol_IN, vol_ET, etc. (5/18/12) #-------------------------------------------------------- self.vol_Q += (self.Q_outlet * self.dt) ## Experiment: 5/19/12. ## self.vol_Q += (self.Q[self.outlet_ID] * self.dt) # update_Q_out_integral() #------------------------------------------------------------- def update_mins_and_maxes(self, REPORT=False): #-------------------------------------- # Get mins and max over entire domain #-------------------------------------- ## Q_min = self.Q.min() ## Q_max = self.Q.max() ## #--------------------- ## u_min = self.u.min() ## u_max = self.u.max() ## #--------------------- ## d_min = self.d.min() ## d_max = self.d.max() #-------------------------------------------- # Exclude edges where mins are always zero. #-------------------------------------------- nx = self.nx ny = self.ny Q_min = self.Q[1:(ny - 2)+1,1:(nx - 2)+1].min() Q_max = self.Q[1:(ny - 2)+1,1:(nx - 2)+1].max() #------------------------------------------------- u_min = self.u[1:(ny - 2)+1,1:(nx - 2)+1].min() u_max = self.u[1:(ny - 2)+1,1:(nx - 2)+1].max() #------------------------------------------------- d_min = self.d[1:(ny - 2)+1,1:(nx - 2)+1].min() d_max = self.d[1:(ny - 2)+1,1:(nx - 2)+1].max() #------------------------------------------------- # (2/6/13) This preserves "mutable scalars" that # can be accessed as refs by other components. #------------------------------------------------- if (Q_min < self.Q_min): self.Q_min.fill( Q_min ) if (Q_max > self.Q_max): self.Q_max.fill( Q_max ) #------------------------------ if (u_min < self.u_min): self.u_min.fill( u_min ) if (u_max > self.u_max): self.u_max.fill( u_max ) #------------------------------ if (d_min < self.d_min): self.d_min.fill( d_min ) if (d_max > self.d_max): self.d_max.fill( d_max ) #------------------------------------------------- # (2/6/13) This preserves "mutable scalars" that # can be accessed as refs by other components. #------------------------------------------------- ## self.Q_min.fill( np.minimum( self.Q_min, Q_min ) ) ## self.Q_max.fill( np.maximum( self.Q_max, Q_max ) ) ## #--------------------------------------------------- ## self.u_min.fill( np.minimum( self.u_min, u_min ) ) ## self.u_max.fill( np.maximum( self.u_max, u_max ) ) ## #--------------------------------------------------- ## self.d_min.fill( np.minimum( self.d_min, d_min ) ) ## self.d_max.fill( np.maximum( self.d_max, d_max ) ) #------------------------------------------------- # (2/6/13) This preserves "mutable scalars" that # can be accessed as refs by other components. #------------------------------------------------- ## self.Q_min.fill( min( self.Q_min, Q_min ) ) ## self.Q_max.fill( max( self.Q_max, Q_max ) ) ## #--------------------------------------------------- ## self.u_min.fill( min( self.u_min, u_min ) ) ## self.u_max.fill( max( self.u_max, u_max ) ) ## #--------------------------------------------------- ## self.d_min.fill( min( self.d_min, d_min ) ) ## self.d_max.fill( max( self.d_max, d_max ) ) #---------------------------------------------- # (2/6/13) This produces "immutable scalars". #---------------------------------------------- ## self.Q_min = self.Q.min() ## self.Q_max = self.Q.max() ## self.u_min = self.u.min() ## self.u_max = self.u.max() ## self.d_min = self.d.min() ## self.d_max = self.d.max() if (REPORT): print 'In channels_base.update_mins_and_maxes():' print '(dmin, dmax) =', self.d_min, self.d_max print '(umin, umax) =', self.u_min, self.u_max print '(Qmin, Qmax) =', self.Q_min, self.Q_max print ' ' # update_mins_and_maxes() #------------------------------------------------------------------- def check_flow_depth(self): OK = True d = self.d dt = self.dt nx = self.nx ################# #--------------------------------- # All all flow depths positive ? #--------------------------------- wbad = np.where( np.logical_or( d < 0.0, np.logical_not(np.isfinite(d)) )) nbad = np.size( wbad[0] ) if (nbad == 0): return OK OK = False dmin = d[wbad].min() star_line = '*******************************************' msg = [ star_line, \ 'ERROR: Simulation aborted.', ' ', \ 'Negative depth found: ' + str(dmin), \ 'Time step may be too large.', \ 'Time step: ' + str(dt) + ' [s]', ' '] for k in xrange(len(msg)): print msg[k] #------------------------------------------- # If not too many, print actual velocities #------------------------------------------- if (nbad < 30): brow = wbad[0][0] bcol = wbad[1][0] ## badi = wbad[0] ## bcol = (badi % nx) ## brow = (badi / nx) crstr = str(bcol) + ', ' + str(brow) msg = ['(Column, Row): ' + crstr, \ 'Flow depth: ' + str(d[brow, bcol])] for k in xrange(len(msg)): print msg[k] print star_line print ' ' return OK # check_flow_depth #------------------------------------------------------------------- def check_flow_velocity(self): OK = True u = self.u dt = self.dt nx = self.nx #-------------------------------- # Are all velocities positive ? #-------------------------------- wbad = np.where( np.logical_or( u < 0.0, np.logical_not(np.isfinite(u)) )) nbad = np.size( wbad[0] ) if (nbad == 0): return OK OK = False umin = u[wbad].min() star_line = '*******************************************' msg = [ star_line, \ 'ERROR: Simulation aborted.', ' ', \ 'Negative or NaN velocity found: ' + str(umin), \ 'Time step may be too large.', \ 'Time step: ' + str(dt) + ' [s]', ' '] for k in xrange(len(msg)): print msg[k] #------------------------------------------- # If not too many, print actual velocities #------------------------------------------- if (nbad < 30): brow = wbad[0][0] bcol = wbad[1][0] ## badi = wbad[0] ## bcol = (badi % nx) ## brow = (badi / nx) crstr = str(bcol) + ', ' + str(brow) msg = ['(Column, Row): ' + crstr, \ 'Velocity: ' + str(u[brow, bcol])] for k in xrange(len(msg)): print msg[k] print star_line print ' ' return OK ## umin = u[wbad].min() ## badi = wbad[0] ## bcol = (badi % nx) ## brow = (badi / nx) ## crstr = str(bcol) + ', ' + str(brow) ## msg = np.array([' ', \ ## '*******************************************', \ ## 'ERROR: Simulation aborted.', ' ', \ ## 'Negative velocity found: ' + str(umin), \ ## 'Time step may be too large.', ' ', \ ## '(Column, Row): ' + crstr, \ ## 'Velocity: ' + str(u[badi]), \ ## 'Time step: ' + str(dt) + ' [s]', \ ## '*******************************************', ' ']) ## for k in xrange( np.size(msg) ): ## print msg[k] ## return OK # check_flow_velocity #------------------------------------------------------------------- def open_input_files(self): # This doesn't work, because file_unit doesn't get full path. (10/28/11) # start_dir = os.getcwd() # os.chdir( self.in_directory ) # print '### start_dir =', start_dir # print '### in_directory =', self.in_directory in_files = ['slope_file', 'nval_file', 'z0val_file', 'width_file', 'angle_file', 'sinu_file', 'd0_file'] self.prepend_directory( in_files, INPUT=True ) # self.slope_file = self.in_directory + self.slope_file # self.nval_file = self.in_directory + self.nval_file # self.z0val_file = self.in_directory + self.z0val_file # self.width_file = self.in_directory + self.width_file # self.angle_file = self.in_directory + self.angle_file # self.sinu_file = self.in_directory + self.sinu_file # self.d0_file = self.in_directory + self.d0_file #self.code_unit = model_input.open_file(self.code_type, self.code_file) self.slope_unit = model_input.open_file(self.slope_type, self.slope_file) if (self.MANNING): self.nval_unit = model_input.open_file(self.nval_type, self.nval_file) if (self.LAW_OF_WALL): self.z0val_unit = model_input.open_file(self.z0val_type, self.z0val_file) self.width_unit = model_input.open_file(self.width_type, self.width_file) self.angle_unit = model_input.open_file(self.angle_type, self.angle_file) self.sinu_unit = model_input.open_file(self.sinu_type, self.sinu_file) self.d0_unit = model_input.open_file(self.d0_type, self.d0_file) # os.chdir( start_dir ) # open_input_files() #------------------------------------------------------------------- def read_input_files(self): #--------------------------------------------------- # The flow codes are always a grid, size of DEM. #--------------------------------------------------- # NB! model_input.py also has a read_grid() function. #--------------------------------------------------- rti = self.rti ## print 'Reading D8 flow grid (in CHANNELS)...' ## self.code = rtg_files.read_grid(self.code_file, rti, ## RTG_type='BYTE') ## print ' ' #------------------------------------------------------- # All grids are assumed to have a data type of Float32. #------------------------------------------------------- slope = model_input.read_next(self.slope_unit, self.slope_type, rti) if (slope != None): self.slope = slope # If EOF was reached, hopefully numpy's "fromfile" # returns None, so that the stored value will be # the last value that was read. if (self.MANNING): nval = model_input.read_next(self.nval_unit, self.nval_type, rti) if (nval != None): self.nval = nval self.nval_min = nval.min() self.nval_max = nval.max() if (self.LAW_OF_WALL): z0val = model_input.read_next(self.z0val_unit, self.z0val_type, rti) if (z0val != None): self.z0val = z0val self.z0val_min = z0val.min() self.z0val_max = z0val.max() width = model_input.read_next(self.width_unit, self.width_type, rti) if (width != None): self.width = width angle = model_input.read_next(self.angle_unit, self.angle_type, rti) if (angle != None): #----------------------------------------------- # Convert bank angles from degrees to radians. #----------------------------------------------- self.angle = angle * self.deg_to_rad # [radians] ### self.angle = angle # (before 9/9/14) sinu = model_input.read_next(self.sinu_unit, self.sinu_type, rti) if (sinu != None): self.sinu = sinu d0 = model_input.read_next(self.d0_unit, self.d0_type, rti) if (d0 != None): self.d0 = d0 ## code = model_input.read_grid(self.code_unit, \ ## self.code_type, rti, dtype='UInt8') ## if (code != None): self.code = code # read_input_files() #------------------------------------------------------------------- def close_input_files(self): # if not(self.slope_unit.closed): # if (self.slope_unit != None): #------------------------------------------------- # NB! self.code_unit was never defined as read. #------------------------------------------------- # if (self.code_type != 'scalar'): self.code_unit.close() if (self.slope_type != 'Scalar'): self.slope_unit.close() if (self.MANNING): if (self.nval_type != 'Scalar'): self.nval_unit.close() if (self.LAW_OF_WALL): if (self.z0val_type != 'Scalar'): self.z0val_unit.close() if (self.width_type != 'Scalar'): self.width_unit.close() if (self.angle_type != 'Scalar'): self.angle_unit.close() if (self.sinu_type != 'Scalar'): self.sinu_unit.close() if (self.d0_type != 'Scalar'): self.d0_unit.close() ## if (self.slope_file != ''): self.slope_unit.close() ## if (self.MANNING): ## if (self.nval_file != ''): self.nval_unit.close() ## if (self.LAW_OF_WALL): ## if (self.z0val_file != ''): self.z0val_unit.close() ## if (self.width_file != ''): self.width_unit.close() ## if (self.angle_file != ''): self.angle_unit.close() ## if (self.sinu_file != ''): self.sinu_unit.close() ## if (self.d0_file != ''): self.d0_unit.close() # close_input_files() #------------------------------------------------------------------- def update_outfile_names(self): #------------------------------------------------- # Notes: Append out_directory to outfile names. #------------------------------------------------- self.Q_gs_file = (self.out_directory + self.Q_gs_file) self.u_gs_file = (self.out_directory + self.u_gs_file) self.d_gs_file = (self.out_directory + self.d_gs_file) self.f_gs_file = (self.out_directory + self.f_gs_file) #-------------------------------------------------------- self.Q_ts_file = (self.out_directory + self.Q_ts_file) self.u_ts_file = (self.out_directory + self.u_ts_file) self.d_ts_file = (self.out_directory + self.d_ts_file) self.f_ts_file = (self.out_directory + self.f_ts_file) # update_outfile_names() #------------------------------------------------------------------- def bundle_output_files(self): ################################################### # NOT READY YET. Need "get_long_name()" and a new # version of "get_var_units". (9/21/14) ################################################### #------------------------------------------------------------- # Bundle the output file info into an array for convenience. # Then we just need one open_output_files(), in BMI_base.py, # and one close_output_files(). Less to maintain. (9/21/14) #------------------------------------------------------------- # gs = grid stack, ts = time series, ps = profile series. #------------------------------------------------------------- self.out_files = [ {var_name:'Q', save_gs:self.SAVE_Q_GRIDS, gs_file:self.Q_gs_file, save_ts:self.SAVE_Q_PIXELS, ts_file:self.Q_ts_file, long_name:get_long_name('Q'), units_name:get_var_units('Q')}, #----------------------------------------------------------------- {var_name:'u', save_gs:self.SAVE_U_GRIDS, gs_file:self.u_gs_file, save_ts:self.SAVE_U_PIXELS, ts_file:self.u_ts_file, long_name:get_long_name('u'), units_name:get_var_units('u')}, #----------------------------------------------------------------- {var_name:'d', save_gs:self.SAVE_D_GRIDS, gs_file:self.d_gs_file, save_ts:self.SAVE_D_PIXELS, ts_file:self.d_ts_file, long_name:get_long_name('d'), units_name:get_var_units('d')}, #----------------------------------------------------------------- {var_name:'f', save_gs:self.SAVE_F_GRIDS, gs_file:self.f_gs_file, save_ts:self.SAVE_F_PIXELS, ts_file:self.f_ts_file, long_name:get_long_name('f'), units_name:get_var_units('f')} ] # bundle_output_files #------------------------------------------------------------------- def open_output_files(self): model_output.check_netcdf() self.update_outfile_names() ## self.bundle_output_files() ## print 'self.SAVE_Q_GRIDS =', self.SAVE_Q_GRIDS ## print 'self.SAVE_U_GRIDS =', self.SAVE_U_GRIDS ## print 'self.SAVE_D_GRIDS =', self.SAVE_D_GRIDS ## print 'self.SAVE_F_GRIDS =', self.SAVE_F_GRIDS ## #--------------------------------------------------- ## print 'self.SAVE_Q_PIXELS =', self.SAVE_Q_PIXELS ## print 'self.SAVE_U_PIXELS =', self.SAVE_U_PIXELS ## print 'self.SAVE_D_PIXELS =', self.SAVE_D_PIXELS ## print 'self.SAVE_F_PIXELS =', self.SAVE_F_PIXELS # IDs = self.outlet_IDs # for k in xrange( len(self.out_files) ): # #-------------------------------------- # # Open new files to write grid stacks # #-------------------------------------- # if (self.out_files[k].save_gs): # model_output.open_new_gs_file( self, self.out_files[k], self.rti ) # #-------------------------------------- # # Open new files to write time series # #-------------------------------------- # if (self.out_files[k].save_ts): # model_output.open_new_ts_file( self, self.out_files[k], IDs ) #-------------------------------------- # Open new files to write grid stacks #-------------------------------------- if (self.SAVE_Q_GRIDS): model_output.open_new_gs_file( self, self.Q_gs_file, self.rti, var_name='Q', long_name='volumetric_discharge', units_name='m^3/s') if (self.SAVE_U_GRIDS): model_output.open_new_gs_file( self, self.u_gs_file, self.rti, var_name='u', long_name='mean_channel_flow_velocity', units_name='m/s') if (self.SAVE_D_GRIDS): model_output.open_new_gs_file( self, self.d_gs_file, self.rti, var_name='d', long_name='max_channel_flow_depth', units_name='m') if (self.SAVE_F_GRIDS): model_output.open_new_gs_file( self, self.f_gs_file, self.rti, var_name='f', long_name='friction_factor', units_name='none') #-------------------------------------- # Open new files to write time series #-------------------------------------- IDs = self.outlet_IDs if (self.SAVE_Q_PIXELS): model_output.open_new_ts_file( self, self.Q_ts_file, IDs, var_name='Q', long_name='volumetric_discharge', units_name='m^3/s') if (self.SAVE_U_PIXELS): model_output.open_new_ts_file( self, self.u_ts_file, IDs, var_name='u', long_name='mean_channel_flow_velocity', units_name='m/s') if (self.SAVE_D_PIXELS): model_output.open_new_ts_file( self, self.d_ts_file, IDs, var_name='d', long_name='max_channel_flow_depth', units_name='m') if (self.SAVE_F_PIXELS): model_output.open_new_ts_file( self, self.f_ts_file, IDs, var_name='f', long_name='friction_factor', units_name='none') # open_output_files() #------------------------------------------------------------------- def write_output_files(self, time_seconds=None): #--------------------------------------------------------- # Notes: This function was written to use only model # time (maybe from a caller) in seconds, and # the save_grid_dt and save_pixels_dt parameters # read by read_cfg_file(). # # read_cfg_file() makes sure that all of # the "save_dts" are larger than or equal to the # process dt. #--------------------------------------------------------- #----------------------------------------- # Allows time to be passed from a caller #----------------------------------------- if (time_seconds is None): time_seconds = self.time_sec model_time = int(time_seconds) #---------------------------------------- # Save computed values at sampled times #---------------------------------------- if (model_time % int(self.save_grid_dt) == 0): self.save_grids() if (model_time % int(self.save_pixels_dt) == 0): self.save_pixel_values() #---------------------------------------- # Save computed values at sampled times #---------------------------------------- ## if ((self.time_index % self.grid_save_step) == 0): ## self.save_grids() ## if ((self.time_index % self.pixel_save_step) == 0): ## self.save_pixel_values() # write_output_files() #------------------------------------------------------------------- def close_output_files(self): if (self.SAVE_Q_GRIDS): model_output.close_gs_file( self, 'Q') if (self.SAVE_U_GRIDS): model_output.close_gs_file( self, 'u') if (self.SAVE_D_GRIDS): model_output.close_gs_file( self, 'd') if (self.SAVE_F_GRIDS): model_output.close_gs_file( self, 'f') #--------------------------------------------------------------- if (self.SAVE_Q_PIXELS): model_output.close_ts_file( self, 'Q') if (self.SAVE_U_PIXELS): model_output.close_ts_file( self, 'u') if (self.SAVE_D_PIXELS): model_output.close_ts_file( self, 'd') if (self.SAVE_F_PIXELS): model_output.close_ts_file( self, 'f') # close_output_files() #------------------------------------------------------------------- def save_grids(self): #----------------------------------- # Save grid stack to a netCDF file #--------------------------------------------- # Note that add_grid() methods will convert # var from scalar to grid now, if necessary. #--------------------------------------------- if (self.SAVE_Q_GRIDS): model_output.add_grid( self, self.Q, 'Q', self.time_min ) if (self.SAVE_U_GRIDS): model_output.add_grid( self, self.u, 'u', self.time_min ) if (self.SAVE_D_GRIDS): model_output.add_grid( self, self.d, 'd', self.time_min ) if (self.SAVE_F_GRIDS): model_output.add_grid( self, self.f, 'f', self.time_min ) # save_grids() #------------------------------------------------------------------- def save_pixel_values(self): ##### save_time_series_data(self) ####### IDs = self.outlet_IDs time = self.time_min ##### #------------- # New method #------------- if (self.SAVE_Q_PIXELS): model_output.add_values_at_IDs( self, time, self.Q, 'Q', IDs ) if (self.SAVE_U_PIXELS): model_output.add_values_at_IDs( self, time, self.u, 'u', IDs ) if (self.SAVE_D_PIXELS): model_output.add_values_at_IDs( self, time, self.d, 'd', IDs ) if (self.SAVE_F_PIXELS): model_output.add_values_at_IDs( self, time, self.f, 'f', IDs ) # save_pixel_values() #------------------------------------------------------------------- def manning_formula(self): #--------------------------------------------------------- # Notes: R = (A/P) = hydraulic radius [m] # N = Manning's roughness coefficient # (usually in the range 0.012 to 0.035) # S = bed slope or free slope # R,S, and N may be 2D arrays. # If length units are all *feet*, then an extra # factor of 1.49 must be applied. If units are # meters, no such factor is needed. # Note that Q = Ac * u, where Ac is cross-section # area. For a trapezoid, Ac does not equal w*d. #--------------------------------------------------------- if (self.KINEMATIC_WAVE): S = self.S_bed else: S = self.S_free u = (self.Rh ** self.two_thirds) * np.sqrt(S) / self.nval #-------------------------------------------------------- # Add a hydraulic jump option for when u gets too big ? #-------------------------------------------------------- return u # manning_formula() #------------------------------------------------------------------- def law_of_the_wall(self): #--------------------------------------------------------- # Notes: u = flow velocity [m/s] # d = flow depth [m] # z0 = roughness length # S = bed slope or free slope # g = 9.81 = gravitation constant [m/s^2] # kappa = 0.41 = von Karman's constant # aval = 0.48 = integration constant # law_const = sqrt(g)/kappa = 7.6393d # smoothness = (aval / z0) * d # f = (kappa / alog(smoothness))^2d # tau_bed = rho_w * f * u^2 = rho_w * g * d * S # d, S, and z0 can be arrays. # To make default z0 correspond to default # Manning's n, can use this approximation: # z0 = a * (2.34 * sqrt(9.81) * n / kappa)^6d # For n=0.03, this gives: z0 = 0.011417 ######################################################### # However, for n=0.3, it gives: z0 = 11417.413 # which is 11.4 km! So the approximation only # holds within some range of values. #-------------------------------------------------------- if (self.KINEMATIC_WAVE): S = self.S_bed else: S = self.S_free smoothness = (self.aval / self.z0val) * self.d #------------------------------------------------ # Make sure (smoothness > 1) before taking log. # Should issue a warning if this is used. #------------------------------------------------ smoothness = np.maximum(smoothness, np.float64(1.1)) u = self.law_const * np.sqrt(self.Rh * S) * np.log(smoothness) #-------------------------------------------------------- # Add a hydraulic jump option for when u gets too big ? #-------------------------------------------------------- return u # law_of_the_wall() #------------------------------------------------------------------- def print_status_report(self): #---------------------------------------------------- # Wherever depth is less than z0, assume that water # is not flowing and set u and Q to zero. # However, we also need (d gt 0) to avoid a divide # by zero problem, even when numerators are zero. #---------------------------------------------------- # FLOWING = (d > (z0/aval)) #*** FLOWING[noflow_IDs] = False ;****** wflow = np.where( FLOWING != 0 ) n_flow = np.size( wflow[0] ) n_pixels = self.rti.n_pixels percent = np.float64(100.0) * (np.float64(n_flow) / n_pixels) fstr = ('%5.1f' % percent) + '%' # fstr = idl_func.string(percent, format='(F5.1)').strip() + '%' print ' Percentage of pixels with flow = ' + fstr print ' ' self.update_mins_and_maxes(REPORT=True) wmax = np.where(self.Q == self.Q_max) nwmax = np.size(wmax[0]) print ' Max(Q) occurs at: ' + str( wmax[0] ) #print,' Max attained at ', nwmax, ' pixels.' print ' ' print '-------------------------------------------------' # print_status_report() #------------------------------------------------------------------- def remove_bad_slopes(self, FLOAT=False): #------------------------------------------------------------ # Notes: The main purpose of this routine is to find # pixels that have nonpositive slopes and replace # then with the smallest value that occurs anywhere # in the input slope grid. For example, pixels on # the edges of the DEM will have a slope of zero. # With the Kinematic Wave option, flow cannot leave # a pixel that has a slope of zero and the depth # increases in an unrealistic manner to create a # spike in the depth grid. # It would be better, of course, if there were # no zero-slope pixels in the DEM. We could use # an "Imposed gradient DEM" to get slopes or some # method of "profile smoothing". # It is possible for the flow code to be nonzero # at a pixel that has NaN for its slope. For these # pixels, we also set the slope to our min value. # 7/18/05. Broke this out into separate procedure. #------------------------------------------------------------ #----------------------------------- # Are there any "bad" pixels ? # If not, return with no messages. #----------------------------------- wb = np.where(np.logical_or((self.slope <= 0.0), \ np.logical_not(np.isfinite(self.slope)))) nbad = np.size(wb[0]) print 'size(slope) =', np.size(self.slope) print 'size(wb) =', nbad wg = np.where(np.invert(np.logical_or((self.slope <= 0.0), \ np.logical_not(np.isfinite(self.slope))))) ngood = np.size(wg[0]) if (nbad == 0) or (ngood == 0): return #--------------------------------------------- # Find smallest positive value in slope grid # and replace the "bad" values with smin. #--------------------------------------------- print '-------------------------------------------------' print 'WARNING: Zero or negative slopes found.' print ' Replacing them with smallest slope.' print ' Use "Profile smoothing tool" instead.' S_min = self.slope[wg].min() S_max = self.slope[wg].max() print ' min(S) = ' + str(S_min) print ' max(S) = ' + str(S_max) print '-------------------------------------------------' print ' ' self.slope[wb] = S_min #-------------------------------- # Convert data type to double ? #-------------------------------- if (FLOAT): self.slope = np.float32(self.slope) else: self.slope = np.float64(self.slope) # remove_bad_slopes #------------------------------------------------------------------- #------------------------------------------------------------------- def Trapezoid_Rh(d, wb, theta): #------------------------------------------------------------- # Notes: Compute the hydraulic radius of a trapezoid that: # (1) has a bed width of wb >= 0 (0 for triangular) # (2) has a bank angle of theta (0 for rectangular) # (3) is filled with water to a depth of d. # The units of wb and d are meters. The units of # theta are assumed to be degrees and are converted. #------------------------------------------------------------- # NB! wb should never be zero, so PW can never be 0, # which would produce a NaN (divide by zero). #------------------------------------------------------------- # See Notes for TF_Tan function in utils_TF.pro # AW = d * (wb + (d * TF_Tan(theta_rad)) ) #------------------------------------------------------------- theta_rad = (theta * np.pi / 180.0) AW = d * (wb + (d * np.tan(theta_rad)) ) PW = wb + (np.float64(2) * d / np.cos(theta_rad) ) Rh = (AW / PW) w = np.where(wb <= 0) nw = np.size(w[0]) return Rh # Trapezoid_Rh() #------------------------------------------------------------------- def Manning_Formula(Rh, S, nval): #--------------------------------------------------------- # Notes: R = (A/P) = hydraulic radius [m] # N = Manning's roughness coefficient # (usually in the range 0.012 to 0.035) # S = bed slope (assumed equal to friction slope) # R,S, and N may be 2D arrays. # If length units are all *feet*, then an extra # factor of 1.49 must be applied. If units are # meters, no such factor is needed. # Note that Q = Ac * u, where Ac is cross-section # area. For a trapezoid, Ac does not equal w*d. #--------------------------------------------------------- ## if (N == None): N = np.float64(0.03) two_thirds = np.float64(2) / 3.0 u = (Rh ** two_thirds) * np.sqrt(S) / nval #------------------------------ # Add a hydraulic jump option # for when u gets too big ?? #------------------------------ return u # Manning_Formula() #------------------------------------------------------------------- def Law_of_the_Wall(d, Rh, S, z0val): #--------------------------------------------------------- # Notes: u = flow velocity [m/s] # d = flow depth [m] # z0 = roughness height # S = bed slope (assumed equal to friction slope) # g = 9.81 = gravitation constant [m/s^2] # kappa = 0.41 = von Karman's constant # aval = 0.48 = integration constant # sqrt(g)/kappa = 7.6393d # smoothness = (aval / z0) * d # f = (kappa / alog(smoothness))^2d # tau_bed = rho_w * f * u^2 = rho_w * g * d * S # d, S, and z0 can be arrays. # To make default z0 correspond to default # Manning's n, can use this approximation: # z0 = a * (2.34 * sqrt(9.81) * n / kappa)^6d # For n=0.03, this gives: z0 = 0.011417 # However, for n=0.3, it gives: z0 = 11417.413 # which is 11.4 km! So the approximation only # holds within some range of values. #-------------------------------------------------------- ## if (self.z0val == None): ## self.z0val = np.float64(0.011417) # (about 1 cm) #------------------------ # Define some constants #------------------------ g = np.float64(9.81) # (gravitation const.) aval = np.float64(0.476) # (integration const.) kappa = np.float64(0.408) # (von Karman's const.) law_const = np.sqrt(g) / kappa smoothness = (aval / z0val) * d #----------------------------- # Make sure (smoothness > 1) #----------------------------- smoothness = np.maximum(smoothness, np.float64(1.1)) u = law_const * np.sqrt(Rh * S) * np.log(smoothness) #------------------------------ # Add a hydraulic jump option # for when u gets too big ?? #------------------------------ return u<|fim▁end|>
'channel_water_x-section__domain_max_of_mean_depth': 'm', 'channel_water_x-section__domain_min_of_mean_depth': 'm', 'channel_water_x-section__domain_max_of_volume_flow_rate': 'm3 s-1', 'channel_water_x-section__domain_min_of_volume_flow_rate': 'm3 s-1',
<|file_name|>server.js<|end_file_name|><|fim▁begin|>// server.js // BASE SETUP // ============================================================================= // call the packages we need var express = require('express'); // call express var app = express(); // define our app using express var bodyParser = require('body-parser'); var json2csv = require('json2csv'); var fs = require('fs'); var path = require('path'); // var fields = ['car', 'price', 'color']; // var myCars = [ // { // "car": "Audi", // "price": 40000, // "color": "blue" // }, { // "car": "BMW", // "price": 35000, // "color": "black" // }, { // "car": "Porsche", // "price": 60000, // "color": "green" // } // ]; // json2csv({ data: myCars, fields: fields }, function(err, csv) { // if (err) console.log(err); // fs.writeFile('file.csv', csv, function(err) { // if (err) throw err; // console.log('file saved'); // }); // }); var mongoose = require('mongoose'); mongoose.connect('mongodb://192.168.10.62:27000/tempTW'); // connect to our database var Bear = require('./models/bear'); var CFSOrg = require('./models/CFSOrganization'); // configure app to use bodyParser() // this will let us get the data from a POST app.use(bodyParser.urlencoded({extended:true,limit:1024*1024*20,type:'application/x-www-form-urlencoding'})); app.use(bodyParser.json({limit:1024*1024*20, type:'application/json'})); var port = process.env.PORT || 9001; // set our port // ROUTES FOR OUR API // ============================================================================= var router = express.Router(); // get an instance of the express Router // Add headers app.use(function (req, res, next) { // Website you wish to allow to connect res.setHeader('Access-Control-Allow-Origin', 'http://localhost:9999'); // Request methods you wish to allow res.setHeader('Access-Control-Allow-Methods', 'GET, POST, OPTIONS, PUT, PATCH, DELETE'); // Request headers you wish to allow res.setHeader('Access-Control-Allow-Headers', 'X-Requested-With,content-type'); // Set to true if you need the website to include cookies in the requests sent // to the API (e.g. in case you use sessions) res.setHeader('Access-Control-Allow-Credentials', true); // Pass to next layer of middleware next(); }); // middleware to use for all requests router.use(function(req, res, next) { // do logging console.log('Something is happening.'); next(); // make sure we go to the next routes and don't stop here }); // test route to make sure everything is working (accessed at GET http://localhost:8080/api) router.get('/', function(req, res) { res.json({ message: 'hooray! welcome to our api!' }); }); app.use('/downloadFile', express.static(path.join(__dirname, 'exports'))); // more routes for our API will happen here // on routes that end in /bears // ---------------------------------------------------- router.route('/bears') // create a bear (accessed at POST http://localhost:8080/api/bears) .post(function(req, res) { var bear = new Bear(); // create a new instance of the Bear model bear.name = req.body.name; // set the bears name (comes from the request) bear.shortName = req.body.shortName; bear.subName = req.body.subName; bear.city = req.body.city; bear.state = req.body.state; bear.country = req.body.country; // save the bear and check for errors bear.save(function(err) { if (err) res.send(err); res.json({ message: 'Bear created!' }); }); }) // get all the bears (accessed at GET http://localhost:8080/api/bears) .get(function(req, res) { Bear.find(function(err, result) { if (err) res.send(err); res.json(result); }); }); router.route('/export') .post(function(req, res) { debugger; console.log(req); var fields = [ '_id', 'name', 'shortName', 'subName', 'city', 'state', 'country' ]; var myBears = []; myBears = req.body; json2csv({ data: myBears, fields: fields }, function(err, csv) { if (err) console.log(err); var exportDir = '/exports' var fileName = 'myBears_file_' + new Date().getTime() + '.csv'; fs.writeFile(path.join(__dirname, exportDir, fileName), csv, function(err) { if (err) throw err; console.log('file saved as : ' + fileName); res.send({csvFile: fileName}); // res.send(path.join(__dirname, exportDir, fileName)); var filePath = path.join(__dirname, exportDir, fileName); var readStream = fs.createReadStream(filePath); readStream.pipe(res); // res.sendFile(exportDir, fileName); }); }); }); // get all the bears (accessed at GET http://localhost:8080/api/bears) // .get(function(req, res) { // var fields = ['name', 'shortName', 'subName']; // var myBears = []; // myBears = req.body; // json2csv({ data: myBears, fields: fields }, function(err, csv) { // if (err) console.log(err); // var fileName = 'myBears_file_' + new Date().getTime() + '_.csv'; // fs.writeFile(fileName, csv, function(err) { // if (err) throw err; // console.log('file saved as : ' + filename); // res.send({csvFile: filename}); // }); // }); // // res.sendFile('myBears_file.csv'); // }); router.route('/CFSOrganizations')<|fim▁hole|> // res.send(err); // res.json(result); // }); CFSOrg.find(function(err, records) { if (err) { handleError(res, err.message, "Failed to get contacts."); } else { res.status(200).json(records); } }); }); // REGISTER OUR ROUTES ------------------------------- // all of our routes will be prefixed with /api app.use('/api', router); // START THE SERVER // ============================================================================= app.listen(port); console.log('Magic happens on port ' + port);<|fim▁end|>
.get(function(req, res) { // CFSOrg.find(function(err, result) { // if (err)
<|file_name|>ObjetoCompuesto.cpp<|end_file_name|><|fim▁begin|><|fim▁hole|> numHijos = 0; m1 = new GLfloat[16]; } ObjetoCompuesto:: ~ObjetoCompuesto() { for(int i =0; i < numHijos; i++) { delete hijos[i]; } } void ObjetoCompuesto:: dibuja() { glMatrixMode(GL_MODELVIEW); glPushMatrix(); glMultMatrixf(dameMatrizAfin()); // Copiar en m1 la matriz actual de modelado-vista glGetFloatv(GL_MODELVIEW_MATRIX, m1); for(int i =0; i < numHijos; i++) { glColor4f(hijos[i]->getR(), hijos[i]->getG(), hijos[i]->getB(), hijos[i]->getA()); glMultMatrixf(hijos[i]->dameMatrizAfin()); hijos[i]->dibuja(); glLoadMatrixf(m1); } glPopMatrix(); } void ObjetoCompuesto:: introduceObjeto(Objeto3D* objeto) { hijos[numHijos++] = objeto; }<|fim▁end|>
#include "ObjetoCompuesto.h" ObjetoCompuesto:: ObjetoCompuesto() { hijos = new Objeto3D*[100000];
<|file_name|>pr22404.C<|end_file_name|><|fim▁begin|>/* { dg-do compile } */ /* { dg-options "-O2" } */ /* We were not getting the offset of a in B and a in C::B correct, causing an abort. */ struct A { A(); }; struct B : A { A a; }; struct C : B { };<|fim▁hole|><|fim▁end|>
C c;
<|file_name|>jvm.component.ts<|end_file_name|><|fim▁begin|><|fim▁hole|> export class JvmController { tabs: Nav.HawtioTab[]; constructor(workspace: Jmx.Workspace) { 'ngInject'; this.tabs = [new Nav.HawtioTab('Remote', '/jvm/connect')]; if (hasLocalMBean(workspace)) { this.tabs.push(new Nav.HawtioTab('Local', '/jvm/local')); } if (hasDiscoveryMBean(workspace)) { this.tabs.push(new Nav.HawtioTab('Discover', '/jvm/discover')); } } } export const jvmComponent: angular.IComponentOptions = { template: '<hawtio-tabs-layout tabs="$ctrl.tabs"></hawtio-tabs-layout>', controller: JvmController }; }<|fim▁end|>
namespace JVM {
<|file_name|>transcriptor.js<|end_file_name|><|fim▁begin|>var final_transcript = ''; var recognizing = false; //var socket = io.connect('http://collab.di.uniba.it:48922');//"http://collab.di.uniba.it/~iaffaldano:48922" //socket.emit('client_type', {text: "Speaker"}); if ('webkitSpeechRecognition' in window) { var recognition = new webkitSpeechRecognition(); recognition.continuous = true; recognition.interimResults = true; console.log("MAX ALTERNATIVES = "+ recognition.maxAlternatives); recognition.onstart = function () { recognizing = true; console.log("RECOGNITION STARTED"); }; recognition.onerror = function (event) { console.log("RECOGNITION ERROR: " + event.error); recognition.start(); }; recognition.onend = function () { console.log("RECOGNITION STOPPED"); if(recognizing){ recognition.start(); console.log("RECOGNITION RESTARTED"); } }; recognition.onresult = function (event) { var interim_transcript = ''; for (var i = event.resultIndex; i < event.results.length; ++i) { if (event.results[i].isFinal) { final_transcript += event.results[i][0].transcript; console.log("CONFIDENCE (" + event.results[i][0].transcript + ") = " + event.results[i][0].confidence); //recognition.stop(); //recognition.start(); socket.emit('client_message', {text: event.results[i][0].transcript}); } else { interim_transcript += event.results[i][0].transcript; } } final_transcript = capitalize(final_transcript); final_span.innerHTML = linebreak(final_transcript); interim_span.innerHTML = linebreak(interim_transcript); }; recognition.onaudiostart= function (event) { console.log("AUDIO START"); }; recognition.onsoundstart= function (event) { console.log("SOUND START"); }; recognition.onspeechstart= function (event) { console.log("SPEECH START"); }; recognition.onspeechend= function (event) { console.log("SPEECH END"); }; recognition.onsoundend= function (event) { console.log("SOUND END"); }; recognition.onnomatch= function (event) { console.log("NO MATCH"); }; } var two_line = /\n\n/g;<|fim▁hole|>function linebreak(s) { return s.replace(two_line, '<p></p>').replace(one_line, '<br>'); } function capitalize(s) { return s.replace(s.substr(0, 1), function (m) { return m.toUpperCase(); }); } function startDictation(event) { if (recognizing) { recognition.stop(); recognizing=false; start_button.innerHTML = "START" return; } final_transcript = ''; recognition.lang = 'it-IT'; recognition.start(); start_button.innerHTML = "STOP" final_span.innerHTML = ''; interim_span.innerHTML = ''; }<|fim▁end|>
var one_line = /\n/g;
<|file_name|>SeverityConfigurationException.java<|end_file_name|><|fim▁begin|>package org.gw4e.eclipse.builder.exception; /*- * #%L * gw4e * $Id:$ * $HeadURL:$ * %% * Copyright (C) 2017 gw4e-project * %% * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * #L% */ import org.gw4e.eclipse.builder.GW4EParser; import org.gw4e.eclipse.builder.Location; public class SeverityConfigurationException extends BuildPolicyConfigurationException { /** * */ private static final long serialVersionUID = 1L; public SeverityConfigurationException(Location location, String message,ParserContextProperties p) { super(location, message,p); } public int getProblemId () { return GW4EParser.INVALID_SEVERITY;<|fim▁hole|><|fim▁end|>
} }
<|file_name|>bookmark_entity.cc<|end_file_name|><|fim▁begin|>// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "sync/test/fake_server/bookmark_entity.h" <|fim▁hole|>#include <string> #include "base/basictypes.h" #include "base/guid.h" #include "sync/internal_api/public/base/model_type.h" #include "sync/protocol/sync.pb.h" #include "sync/test/fake_server/fake_server_entity.h" using std::string; namespace fake_server { namespace { // Returns true if and only if |client_entity| is a bookmark. bool IsBookmark(const sync_pb::SyncEntity& client_entity) { return syncer::GetModelType(client_entity) == syncer::BOOKMARKS; } } // namespace BookmarkEntity::~BookmarkEntity() { } // static scoped_ptr<FakeServerEntity> BookmarkEntity::CreateNew( const sync_pb::SyncEntity& client_entity, const string& parent_id, const string& client_guid) { CHECK(client_entity.version() == 0) << "New entities must have version = 0."; CHECK(IsBookmark(client_entity)) << "The given entity must be a bookmark."; const string id = FakeServerEntity::CreateId(syncer::BOOKMARKS, base::GenerateGUID()); const string originator_cache_guid = client_guid; const string originator_client_item_id = client_entity.id_string(); return scoped_ptr<FakeServerEntity>(new BookmarkEntity( id, client_entity.version(), client_entity.name(), originator_cache_guid, originator_client_item_id, client_entity.unique_position(), client_entity.specifics(), client_entity.folder(), parent_id, client_entity.ctime(), client_entity.mtime())); } // static scoped_ptr<FakeServerEntity> BookmarkEntity::CreateUpdatedVersion( const sync_pb::SyncEntity& client_entity, const FakeServerEntity& current_server_entity, const string& parent_id) { CHECK(client_entity.version() != 0) << "Existing entities must not have a " << "version = 0."; CHECK(IsBookmark(client_entity)) << "The given entity must be a bookmark."; const BookmarkEntity& current_bookmark_entity = static_cast<const BookmarkEntity&>(current_server_entity); const string originator_cache_guid = current_bookmark_entity.originator_cache_guid_; const string originator_client_item_id = current_bookmark_entity.originator_client_item_id_; return scoped_ptr<FakeServerEntity>(new BookmarkEntity( client_entity.id_string(), client_entity.version(), client_entity.name(), originator_cache_guid, originator_client_item_id, client_entity.unique_position(), client_entity.specifics(), client_entity.folder(), parent_id, client_entity.ctime(), client_entity.mtime())); } BookmarkEntity::BookmarkEntity( const string& id, int64 version, const string& name, const string& originator_cache_guid, const string& originator_client_item_id, const sync_pb::UniquePosition& unique_position, const sync_pb::EntitySpecifics& specifics, bool is_folder, const string& parent_id, int64 creation_time, int64 last_modified_time) : FakeServerEntity(id, syncer::BOOKMARKS, version, name), originator_cache_guid_(originator_cache_guid), originator_client_item_id_(originator_client_item_id), unique_position_(unique_position), is_folder_(is_folder), parent_id_(parent_id), creation_time_(creation_time), last_modified_time_(last_modified_time) { SetSpecifics(specifics); } void BookmarkEntity::SetParentId(const string& parent_id) { parent_id_ = parent_id; } string BookmarkEntity::GetParentId() const { return parent_id_; } void BookmarkEntity::SerializeAsProto(sync_pb::SyncEntity* proto) const { FakeServerEntity::SerializeBaseProtoFields(proto); proto->set_originator_cache_guid(originator_cache_guid_); proto->set_originator_client_item_id(originator_client_item_id_); proto->set_parent_id_string(parent_id_); proto->set_ctime(creation_time_); proto->set_mtime(last_modified_time_); sync_pb::UniquePosition* unique_position = proto->mutable_unique_position(); unique_position->CopyFrom(unique_position_); } bool BookmarkEntity::IsFolder() const { return is_folder_; } } // namespace fake_server<|fim▁end|>
<|file_name|>MIndustrialQuadRelayImpl.java<|end_file_name|><|fim▁begin|>/** * Copyright (c) 2010-2016 by the respective copyright holders. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html */ package org.openhab.binding.tinkerforge.internal.model.impl; import java.lang.reflect.InvocationTargetException; import java.util.concurrent.atomic.AtomicBoolean; import org.eclipse.emf.common.notify.Notification; import org.eclipse.emf.common.notify.NotificationChain; import org.eclipse.emf.common.util.EList; import org.eclipse.emf.ecore.EClass; import org.eclipse.emf.ecore.InternalEObject; import org.eclipse.emf.ecore.impl.ENotificationImpl; import org.eclipse.emf.ecore.impl.MinimalEObjectImpl; import org.eclipse.emf.ecore.util.EcoreUtil; import org.openhab.binding.tinkerforge.internal.LoggerConstants; import org.openhab.binding.tinkerforge.internal.TinkerforgeErrorHandler; import org.openhab.binding.tinkerforge.internal.model.MBaseDevice; import org.openhab.binding.tinkerforge.internal.model.MIndustrialQuadRelay; import org.openhab.binding.tinkerforge.internal.model.MIndustrialQuadRelayBricklet; import org.openhab.binding.tinkerforge.internal.model.MSubDevice; import org.openhab.binding.tinkerforge.internal.model.MSubDeviceHolder; import org.openhab.binding.tinkerforge.internal.model.ModelPackage; import org.openhab.binding.tinkerforge.internal.types.OnOffValue; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.tinkerforge.NotConnectedException; import com.tinkerforge.TimeoutException; /** * <!-- begin-user-doc --> * An implementation of the model object '<em><b>MIndustrial Quad Relay</b></em>'. * * @author Theo Weiss * @since 1.4.0 * <!-- end-user-doc --> * <p> * The following features are implemented: * <ul> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.MIndustrialQuadRelayImpl#getSwitchState * <em>Switch State</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.MIndustrialQuadRelayImpl#getLogger * <em>Logger</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.MIndustrialQuadRelayImpl#getUid <em>Uid</em>} * </li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.MIndustrialQuadRelayImpl#isPoll <em>Poll</em>} * </li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.MIndustrialQuadRelayImpl#getEnabledA * <em>Enabled A</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.MIndustrialQuadRelayImpl#getSubId * <em>Sub Id</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.MIndustrialQuadRelayImpl#getMbrick * <em>Mbrick</em>}</li> * <li>{@link org.openhab.binding.tinkerforge.internal.model.impl.MIndustrialQuadRelayImpl#getDeviceType * <em>Device Type</em>}</li> * </ul> * </p> * * @generated */ public class MIndustrialQuadRelayImpl extends MinimalEObjectImpl.Container implements MIndustrialQuadRelay { /** * The default value of the '{@link #getSwitchState() <em>Switch State</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getSwitchState() * @generated * @ordered */ protected static final OnOffValue SWITCH_STATE_EDEFAULT = null; /** * The cached value of the '{@link #getSwitchState() <em>Switch State</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getSwitchState() * @generated * @ordered */ protected OnOffValue switchState = SWITCH_STATE_EDEFAULT; /** * The default value of the '{@link #getLogger() <em>Logger</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getLogger() * @generated * @ordered */ protected static final Logger LOGGER_EDEFAULT = null; /** * The cached value of the '{@link #getLogger() <em>Logger</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getLogger() * @generated * @ordered */ protected Logger logger = LOGGER_EDEFAULT; /** * The default value of the '{@link #getUid() <em>Uid</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getUid() * @generated * @ordered */ protected static final String UID_EDEFAULT = null; /** * The cached value of the '{@link #getUid() <em>Uid</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getUid() * @generated * @ordered */ protected String uid = UID_EDEFAULT; /** * The default value of the '{@link #isPoll() <em>Poll</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #isPoll() * @generated * @ordered */ protected static final boolean POLL_EDEFAULT = true; /** * The cached value of the '{@link #isPoll() <em>Poll</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #isPoll() * @generated * @ordered */ protected boolean poll = POLL_EDEFAULT; /** * The default value of the '{@link #getEnabledA() <em>Enabled A</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getEnabledA() * @generated * @ordered */ protected static final AtomicBoolean ENABLED_A_EDEFAULT = null; /** * The cached value of the '{@link #getEnabledA() <em>Enabled A</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getEnabledA() * @generated * @ordered */ protected AtomicBoolean enabledA = ENABLED_A_EDEFAULT; /** * The default value of the '{@link #getSubId() <em>Sub Id</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getSubId() * @generated * @ordered */ protected static final String SUB_ID_EDEFAULT = null; /** * The cached value of the '{@link #getSubId() <em>Sub Id</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getSubId() * @generated * @ordered */ protected String subId = SUB_ID_EDEFAULT; /** * The default value of the '{@link #getDeviceType() <em>Device Type</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getDeviceType() * @generated * @ordered */ protected static final String DEVICE_TYPE_EDEFAULT = "quad_relay"; /** * The cached value of the '{@link #getDeviceType() <em>Device Type</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getDeviceType() * @generated * @ordered */ protected String deviceType = DEVICE_TYPE_EDEFAULT; private short relayNum; private int mask; private static final byte DEFAULT_SELECTION_MASK = 0000000000000001; private static final byte OFF_BYTE = 0000000000000000; /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ protected MIndustrialQuadRelayImpl() { super(); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override protected EClass eStaticClass() { return ModelPackage.Literals.MINDUSTRIAL_QUAD_RELAY; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public OnOffValue getSwitchState() { return switchState; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setSwitchState(OnOffValue newSwitchState) { OnOffValue oldSwitchState = switchState; switchState = newSwitchState; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.MINDUSTRIAL_QUAD_RELAY__SWITCH_STATE, oldSwitchState, switchState)); } } /** * <!-- begin-user-doc --> <!-- end-user-doc --> * * @generated NOT */ @Override public void turnSwitch(OnOffValue state) { logger.debug("turnSwitchState called on: {}", MIndustrialQuadRelayBrickletImpl.class); try { if (state == OnOffValue.OFF) { logger.debug("setSwitchValue off"); getMbrick().getTinkerforgeDevice().setSelectedValues(mask, OFF_BYTE); } else if (state == OnOffValue.ON) { logger.debug("setSwitchState on"); getMbrick().getTinkerforgeDevice().setSelectedValues(mask, mask); } else { logger.error("{} unkown switchstate {}", LoggerConstants.TFMODELUPDATE, state); } setSwitchState(state); } catch (TimeoutException e) { TinkerforgeErrorHandler.handleError(this, TinkerforgeErrorHandler.TF_TIMEOUT_EXCEPTION, e); } catch (NotConnectedException e) { TinkerforgeErrorHandler.handleError(this, TinkerforgeErrorHandler.TF_NOT_CONNECTION_EXCEPTION, e); } } /** * <!-- begin-user-doc --> <!-- end-user-doc --> * * @generated NOT */ @Override public void fetchSwitchState() { OnOffValue value = OnOffValue.UNDEF; try { int deviceValue = getMbrick().getTinkerforgeDevice().getValue(); if ((deviceValue & mask) == mask) { value = OnOffValue.ON; } else { value = OnOffValue.OFF; } setSwitchState(value); } catch (TimeoutException e) { TinkerforgeErrorHandler.handleError(this, TinkerforgeErrorHandler.TF_TIMEOUT_EXCEPTION, e); } catch (NotConnectedException e) { TinkerforgeErrorHandler.handleError(this, TinkerforgeErrorHandler.TF_NOT_CONNECTION_EXCEPTION, e); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public Logger getLogger() { return logger; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setLogger(Logger newLogger) { Logger oldLogger = logger; logger = newLogger; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.MINDUSTRIAL_QUAD_RELAY__LOGGER, oldLogger, logger)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public String getUid() { return uid; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setUid(String newUid) { String oldUid = uid; uid = newUid; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.MINDUSTRIAL_QUAD_RELAY__UID, oldUid, uid)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public boolean isPoll() { return poll; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setPoll(boolean newPoll) { boolean oldPoll = poll; poll = newPoll; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.MINDUSTRIAL_QUAD_RELAY__POLL, oldPoll, poll)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public AtomicBoolean getEnabledA() { return enabledA; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setEnabledA(AtomicBoolean newEnabledA) { AtomicBoolean oldEnabledA = enabledA; enabledA = newEnabledA; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.MINDUSTRIAL_QUAD_RELAY__ENABLED_A, oldEnabledA, enabledA)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public String getSubId() { return subId; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setSubId(String newSubId) { String oldSubId = subId; subId = newSubId; if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.MINDUSTRIAL_QUAD_RELAY__SUB_ID, oldSubId, subId)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public MIndustrialQuadRelayBricklet getMbrick() { if (eContainerFeatureID() != ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK) { return null; } return (MIndustrialQuadRelayBricklet) eContainer(); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public NotificationChain basicSetMbrick(MIndustrialQuadRelayBricklet newMbrick, NotificationChain msgs) { msgs = eBasicSetContainer((InternalEObject) newMbrick, ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK, msgs); return msgs; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void setMbrick(MIndustrialQuadRelayBricklet newMbrick) { if (newMbrick != eInternalContainer() || (eContainerFeatureID() != ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK && newMbrick != null)) { if (EcoreUtil.isAncestor(this, newMbrick)) { throw new IllegalArgumentException("Recursive containment not allowed for " + toString()); } NotificationChain msgs = null; if (eInternalContainer() != null) { msgs = eBasicRemoveFromContainer(msgs); } if (newMbrick != null) { msgs = ((InternalEObject) newMbrick).eInverseAdd(this, ModelPackage.MSUB_DEVICE_HOLDER__MSUBDEVICES, MSubDeviceHolder.class, msgs); } msgs = basicSetMbrick(newMbrick, msgs); if (msgs != null) { msgs.dispatch(); } } else if (eNotificationRequired()) { eNotify(new ENotificationImpl(this, Notification.SET, ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK, newMbrick, newMbrick)); } } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public String getDeviceType() { return deviceType; } /** * <!-- begin-user-doc --> <!-- end-user-doc --> * * @generated NOT */ @Override public void init() { setEnabledA(new AtomicBoolean()); poll = true; // don't use the setter to prevent notification logger = LoggerFactory.getLogger(MIndustrialQuadRelay.class); relayNum = Short.parseShort(String.valueOf(subId.charAt(subId.length() - 1))); mask = DEFAULT_SELECTION_MASK << relayNum; } /** * <!-- begin-user-doc --> <!-- end-user-doc --> * * @generated NOT */ @Override public void enable() { logger.debug("enable called on MIndustrialQuadRelayImpl"); fetchSwitchState(); } /** * <!-- begin-user-doc --> <!-- end-user-doc --> * <|fim▁hole|> } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public NotificationChain eInverseAdd(InternalEObject otherEnd, int featureID, NotificationChain msgs) { switch (featureID) { case ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK: if (eInternalContainer() != null) { msgs = eBasicRemoveFromContainer(msgs); } return basicSetMbrick((MIndustrialQuadRelayBricklet) otherEnd, msgs); } return super.eInverseAdd(otherEnd, featureID, msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public NotificationChain eInverseRemove(InternalEObject otherEnd, int featureID, NotificationChain msgs) { switch (featureID) { case ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK: return basicSetMbrick(null, msgs); } return super.eInverseRemove(otherEnd, featureID, msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public NotificationChain eBasicRemoveFromContainerFeature(NotificationChain msgs) { switch (eContainerFeatureID()) { case ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK: return eInternalContainer().eInverseRemove(this, ModelPackage.MSUB_DEVICE_HOLDER__MSUBDEVICES, MSubDeviceHolder.class, msgs); } return super.eBasicRemoveFromContainerFeature(msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public Object eGet(int featureID, boolean resolve, boolean coreType) { switch (featureID) { case ModelPackage.MINDUSTRIAL_QUAD_RELAY__SWITCH_STATE: return getSwitchState(); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__LOGGER: return getLogger(); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__UID: return getUid(); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__POLL: return isPoll(); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__ENABLED_A: return getEnabledA(); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__SUB_ID: return getSubId(); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK: return getMbrick(); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__DEVICE_TYPE: return getDeviceType(); } return super.eGet(featureID, resolve, coreType); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void eSet(int featureID, Object newValue) { switch (featureID) { case ModelPackage.MINDUSTRIAL_QUAD_RELAY__SWITCH_STATE: setSwitchState((OnOffValue) newValue); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__LOGGER: setLogger((Logger) newValue); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__UID: setUid((String) newValue); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__POLL: setPoll((Boolean) newValue); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__ENABLED_A: setEnabledA((AtomicBoolean) newValue); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__SUB_ID: setSubId((String) newValue); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK: setMbrick((MIndustrialQuadRelayBricklet) newValue); return; } super.eSet(featureID, newValue); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void eUnset(int featureID) { switch (featureID) { case ModelPackage.MINDUSTRIAL_QUAD_RELAY__SWITCH_STATE: setSwitchState(SWITCH_STATE_EDEFAULT); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__LOGGER: setLogger(LOGGER_EDEFAULT); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__UID: setUid(UID_EDEFAULT); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__POLL: setPoll(POLL_EDEFAULT); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__ENABLED_A: setEnabledA(ENABLED_A_EDEFAULT); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__SUB_ID: setSubId(SUB_ID_EDEFAULT); return; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK: setMbrick((MIndustrialQuadRelayBricklet) null); return; } super.eUnset(featureID); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public boolean eIsSet(int featureID) { switch (featureID) { case ModelPackage.MINDUSTRIAL_QUAD_RELAY__SWITCH_STATE: return SWITCH_STATE_EDEFAULT == null ? switchState != null : !SWITCH_STATE_EDEFAULT.equals(switchState); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__LOGGER: return LOGGER_EDEFAULT == null ? logger != null : !LOGGER_EDEFAULT.equals(logger); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__UID: return UID_EDEFAULT == null ? uid != null : !UID_EDEFAULT.equals(uid); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__POLL: return poll != POLL_EDEFAULT; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__ENABLED_A: return ENABLED_A_EDEFAULT == null ? enabledA != null : !ENABLED_A_EDEFAULT.equals(enabledA); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__SUB_ID: return SUB_ID_EDEFAULT == null ? subId != null : !SUB_ID_EDEFAULT.equals(subId); case ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK: return getMbrick() != null; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__DEVICE_TYPE: return DEVICE_TYPE_EDEFAULT == null ? deviceType != null : !DEVICE_TYPE_EDEFAULT.equals(deviceType); } return super.eIsSet(featureID); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public int eBaseStructuralFeatureID(int derivedFeatureID, Class<?> baseClass) { if (baseClass == MBaseDevice.class) { switch (derivedFeatureID) { case ModelPackage.MINDUSTRIAL_QUAD_RELAY__LOGGER: return ModelPackage.MBASE_DEVICE__LOGGER; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__UID: return ModelPackage.MBASE_DEVICE__UID; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__POLL: return ModelPackage.MBASE_DEVICE__POLL; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__ENABLED_A: return ModelPackage.MBASE_DEVICE__ENABLED_A; default: return -1; } } if (baseClass == MSubDevice.class) { switch (derivedFeatureID) { case ModelPackage.MINDUSTRIAL_QUAD_RELAY__SUB_ID: return ModelPackage.MSUB_DEVICE__SUB_ID; case ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK: return ModelPackage.MSUB_DEVICE__MBRICK; default: return -1; } } return super.eBaseStructuralFeatureID(derivedFeatureID, baseClass); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public int eDerivedStructuralFeatureID(int baseFeatureID, Class<?> baseClass) { if (baseClass == MBaseDevice.class) { switch (baseFeatureID) { case ModelPackage.MBASE_DEVICE__LOGGER: return ModelPackage.MINDUSTRIAL_QUAD_RELAY__LOGGER; case ModelPackage.MBASE_DEVICE__UID: return ModelPackage.MINDUSTRIAL_QUAD_RELAY__UID; case ModelPackage.MBASE_DEVICE__POLL: return ModelPackage.MINDUSTRIAL_QUAD_RELAY__POLL; case ModelPackage.MBASE_DEVICE__ENABLED_A: return ModelPackage.MINDUSTRIAL_QUAD_RELAY__ENABLED_A; default: return -1; } } if (baseClass == MSubDevice.class) { switch (baseFeatureID) { case ModelPackage.MSUB_DEVICE__SUB_ID: return ModelPackage.MINDUSTRIAL_QUAD_RELAY__SUB_ID; case ModelPackage.MSUB_DEVICE__MBRICK: return ModelPackage.MINDUSTRIAL_QUAD_RELAY__MBRICK; default: return -1; } } return super.eDerivedStructuralFeatureID(baseFeatureID, baseClass); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public int eDerivedOperationID(int baseOperationID, Class<?> baseClass) { if (baseClass == MBaseDevice.class) { switch (baseOperationID) { case ModelPackage.MBASE_DEVICE___INIT: return ModelPackage.MINDUSTRIAL_QUAD_RELAY___INIT; case ModelPackage.MBASE_DEVICE___ENABLE: return ModelPackage.MINDUSTRIAL_QUAD_RELAY___ENABLE; case ModelPackage.MBASE_DEVICE___DISABLE: return ModelPackage.MINDUSTRIAL_QUAD_RELAY___DISABLE; default: return -1; } } if (baseClass == MSubDevice.class) { switch (baseOperationID) { default: return -1; } } return super.eDerivedOperationID(baseOperationID, baseClass); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public Object eInvoke(int operationID, EList<?> arguments) throws InvocationTargetException { switch (operationID) { case ModelPackage.MINDUSTRIAL_QUAD_RELAY___INIT: init(); return null; case ModelPackage.MINDUSTRIAL_QUAD_RELAY___ENABLE: enable(); return null; case ModelPackage.MINDUSTRIAL_QUAD_RELAY___DISABLE: disable(); return null; case ModelPackage.MINDUSTRIAL_QUAD_RELAY___TURN_SWITCH__ONOFFVALUE: turnSwitch((OnOffValue) arguments.get(0)); return null; case ModelPackage.MINDUSTRIAL_QUAD_RELAY___FETCH_SWITCH_STATE: fetchSwitchState(); return null; } return super.eInvoke(operationID, arguments); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public String toString() { if (eIsProxy()) { return super.toString(); } StringBuffer result = new StringBuffer(super.toString()); result.append(" (switchState: "); result.append(switchState); result.append(", logger: "); result.append(logger); result.append(", uid: "); result.append(uid); result.append(", poll: "); result.append(poll); result.append(", enabledA: "); result.append(enabledA); result.append(", subId: "); result.append(subId); result.append(", deviceType: "); result.append(deviceType); result.append(')'); return result.toString(); } } // MIndustrialQuadRelayImpl<|fim▁end|>
* @generated NOT */ @Override public void disable() {
<|file_name|>parse.js<|end_file_name|><|fim▁begin|>'use strict'; exports.__esModule = true; const moduleRequire = require('./module-require').default; const extname = require('path').extname; const fs = require('fs'); const log = require('debug')('eslint-plugin-import:parse'); function getBabelEslintVisitorKeys(parserPath) { if (parserPath.endsWith('index.js')) { const hypotheticalLocation = parserPath.replace('index.js', 'visitor-keys.js'); if (fs.existsSync(hypotheticalLocation)) { const keys = moduleRequire(hypotheticalLocation); return keys.default || keys; }<|fim▁hole|>} function keysFromParser(parserPath, parserInstance, parsedResult) { // Exposed by @typescript-eslint/parser and @babel/eslint-parser if (parsedResult && parsedResult.visitorKeys) { return parsedResult.visitorKeys; } if (/.*espree.*/.test(parserPath)) { return parserInstance.VisitorKeys; } if (/.*babel-eslint.*/.test(parserPath)) { return getBabelEslintVisitorKeys(parserPath); } return null; } exports.default = function parse(path, content, context) { if (context == null) throw new Error('need context to parse properly'); let parserOptions = context.parserOptions; const parserPath = getParserPath(path, context); if (!parserPath) throw new Error('parserPath is required!'); // hack: espree blows up with frozen options parserOptions = Object.assign({}, parserOptions); parserOptions.ecmaFeatures = Object.assign({}, parserOptions.ecmaFeatures); // always include comments and tokens (for doc parsing) parserOptions.comment = true; parserOptions.attachComment = true; // keeping this for backward-compat with older parsers parserOptions.tokens = true; // attach node locations parserOptions.loc = true; parserOptions.range = true; // provide the `filePath` like eslint itself does, in `parserOptions` // https://github.com/eslint/eslint/blob/3ec436ee/lib/linter.js#L637 parserOptions.filePath = path; // @typescript-eslint/parser will parse the entire project with typechecking if you provide // "project" or "projects" in parserOptions. Removing these options means the parser will // only parse one file in isolate mode, which is much, much faster. // https://github.com/import-js/eslint-plugin-import/issues/1408#issuecomment-509298962 delete parserOptions.project; delete parserOptions.projects; // require the parser relative to the main module (i.e., ESLint) const parser = moduleRequire(parserPath); if (typeof parser.parseForESLint === 'function') { let ast; try { const parserRaw = parser.parseForESLint(content, parserOptions); ast = parserRaw.ast; return { ast, visitorKeys: keysFromParser(parserPath, parser, parserRaw), }; } catch (e) { console.warn(); console.warn('Error while parsing ' + parserOptions.filePath); console.warn('Line ' + e.lineNumber + ', column ' + e.column + ': ' + e.message); } if (!ast || typeof ast !== 'object') { console.warn( '`parseForESLint` from parser `' + parserPath + '` is invalid and will just be ignored' ); } else { return { ast, visitorKeys: keysFromParser(parserPath, parser, undefined), }; } } const keys = keysFromParser(parserPath, parser, undefined); return { ast: parser.parse(content, parserOptions), visitorKeys: keys, }; }; function getParserPath(path, context) { const parsers = context.settings['import/parsers']; if (parsers != null) { const extension = extname(path); for (const parserPath in parsers) { if (parsers[parserPath].indexOf(extension) > -1) { // use this alternate parser log('using alt parser:', parserPath); return parserPath; } } } // default to use ESLint parser return context.parserPath; }<|fim▁end|>
} return null;
<|file_name|>praznici.spec.ts<|end_file_name|><|fim▁begin|>import { PrazniciProvider } from './praznici'; let praznici: PrazniciProvider = null; describe('PrazniciProvider', () => { beforeEach(() => { praznici = new PrazniciProvider(); // spyOn(praznici['storage'], 'set').and.callThrough(); }); it('returns a date for Easter', () => { expect(praznici.getVeligden(2017).valueOf()).toEqual(new Date(2017, 3, 16).valueOf()); }); it('expects that the winter Commemoration of the departed to be 59 days before Easter', () => { let easter = praznici.getVeligden(2017).valueOf(); let zadushicaZimska = praznici.getZadushnicaZimska(2017); expect(zadushicaZimska.setDate(zadushicaZimska.getDate() + 59).valueOf()).toEqual(easter); }); it('expects that the Forgiveness to be 49 days before Easter', () => { let easter = praznici.getVeligden(2017).valueOf(); let forgiveness = praznici.getProchka(2017); expect(forgiveness.setDate(forgiveness.getDate() + 49).valueOf()).toEqual(easter); }); it('expects that the Long Length to be 48 days before Easter', () => { let easter = praznici.getVeligden(2017).valueOf(); let longLength = praznici.getVeligdenskiPosti(2017); expect(longLength.setDate(longLength.getDate() + 48).valueOf()).toEqual(easter);<|fim▁hole|> });<|fim▁end|>
});
<|file_name|>chip.cc<|end_file_name|><|fim▁begin|>// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/autofill_assistant/browser/chip.h" #include "components/autofill_assistant/browser/user_action.h" namespace autofill_assistant { Chip::Chip() = default; Chip::~Chip() = default; Chip::Chip(const Chip& other) = default; Chip::Chip(const ChipProto& proto) : type(proto.type()), icon(proto.icon()), text(proto.text()), sticky(proto.sticky()), content_description(proto.content_description()), is_content_description_set(proto.has_content_description()) {} bool Chip::empty() const { return type == UNKNOWN_CHIP_TYPE && text.empty() && icon == NO_ICON; } void SetDefaultChipType(std::vector<UserAction>* user_actions) { for (UserAction& user_action : *user_actions) { if (user_action.chip().empty()) continue;<|fim▁hole|> if (user_action.chip().type == UNKNOWN_CHIP_TYPE) { // Assume chips with unknown type are normal actions. user_action.chip().type = NORMAL_ACTION; } } } } // namespace autofill_assistant<|fim▁end|>
<|file_name|>test_autoscaler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python """ This script constructs an Marathon application definition for the stress tester container. Be sure to deploy the latest stress tester docker image to the registry before running this. """ import argparse import json import os import sys BASE_PATH = os.path.dirname(os.path.realpath(__file__)) PROJECT_PATH = os.path.dirname(BASE_PATH) sys.path.append(os.path.join(PROJECT_PATH, 'lib/')) from marathon_autoscaler.marathon import Marathon def load_app_definition(): with open(os.path.join(os.getcwd(), "data", "stress_tester_app.json"), 'r') as f: test_app_definition = json.load(f) return test_app_definition def load_stress_parameters(): with open(os.path.join(os.getcwd(), "data", "stress-parameters.json"), 'r') as f: test_app_definition = json.load(f) return test_app_definition def load_autoscaler_parameters(): with open(os.path.join(os.getcwd(), "data", "autoscaler-parameters.json"), 'r') as f: test_app_definition = json.load(f) return test_app_definition def parse_cli_args():<|fim▁hole|> parser.add_argument("--marathon-uri", dest="marathon_uri", type=str, required=True, help="The Marathon Endpoint") parser.add_argument("--marathon-user", dest="marathon_user", type=str, required=True, help="Username for Marathon access") parser.add_argument("--marathon-pass", dest="marathon_pass", type=str, required=True, help="Password for Marathon access") return parser.parse_args() if __name__ == "__main__": args = parse_cli_args() app_def = load_app_definition() mara = Marathon(args.marathon_uri, (args.marathon_user, args.marathon_pass)) stress_params = load_stress_parameters() autoscaler_params = load_autoscaler_parameters() print(""" Stress Parameters: {0} """.format(stress_params)) print(""" Scaling Parameters: {0} """.format(autoscaler_params)) app_def["labels"]["use_marathon_autoscaler"] = "0.0.3" app_def["labels"]["min_instances"] = str(autoscaler_params["min_instances"]) app_def["labels"]["max_instances"] = str(autoscaler_params["max_instances"]) app_def["labels"]["mas_rule_scaleup_1"] = "cpu | >90 | PT2M | 1 | PT2M" app_def["labels"]["mas_rule_scaleup_2"] = "mem | >90 | PT2M | 1 | PT2M" app_def["labels"]["mas_rule_scaledown"] = "cpu | <90 | PT2M | -1 | PT2M" app_def["env"]["INSTRUCTIONS"] = json.dumps(stress_params).replace("\n", "").replace(" ", "") response = mara.create_app(app_def) print(response)<|fim▁end|>
parser = argparse.ArgumentParser(description="Stress Tester Deployer")
<|file_name|>ban.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2014-2016, The Monero Project // // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are // permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other // materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be // used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL // THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers #include "gtest/gtest.h" #include "cryptonote_core/cryptonote_core.h" #include "p2p/net_node.h" #include "cryptonote_protocol/cryptonote_protocol_handler.h" namespace cryptonote { class blockchain_storage; } class test_core { public: void on_synchronized(){} uint64_t get_current_blockchain_height() const {return 1;} void set_target_blockchain_height(uint64_t) {} bool init(const boost::program_options::variables_map& vm) {return true ;} bool deinit(){return true;} bool get_short_chain_history(std::list<crypto::hash>& ids) const { return true; } bool get_stat_info(cryptonote::core_stat_info& st_inf) const {return true;} bool have_block(const crypto::hash& id) const {return true;} bool get_blockchain_top(uint64_t& height, crypto::hash& top_id)const{height=0;top_id=cryptonote::null_hash;return true;} bool handle_incoming_tx(const cryptonote::blobdata& tx_blob, cryptonote::tx_verification_context& tvc, bool keeped_by_block, bool relaued) { return true; } bool handle_incoming_block(const cryptonote::blobdata& block_blob, cryptonote::block_verification_context& bvc, bool update_miner_blocktemplate = true) { return true; } void pause_mine(){} void resume_mine(){} bool on_idle(){return true;} bool find_blockchain_supplement(const std::list<crypto::hash>& qblock_ids, cryptonote::NOTIFY_RESPONSE_CHAIN_ENTRY::request& resp){return true;} bool handle_get_objects(cryptonote::NOTIFY_REQUEST_GET_OBJECTS::request& arg, cryptonote::NOTIFY_RESPONSE_GET_OBJECTS::request& rsp, cryptonote::cryptonote_connection_context& context){return true;} cryptonote::blockchain_storage &get_blockchain_storage() { throw std::runtime_error("Called invalid member function: please never call get_blockchain_storage on the TESTING class test_core."); } bool get_test_drop_download() const {return true;} bool get_test_drop_download_height() const {return true;} bool prepare_handle_incoming_blocks(const std::list<cryptonote::block_complete_entry> &blocks) { return true; } bool cleanup_handle_incoming_blocks(bool force_sync = false) { return true; } uint64_t get_target_blockchain_height() const { return 1; } }; typedef nodetool::node_server<cryptonote::t_cryptonote_protocol_handler<test_core>> Server; static bool is_blocked(Server &server, uint32_t ip, time_t *t = NULL) { std::map<uint32_t, time_t> ips = server.get_blocked_ips(); for (auto rec: ips) { if (rec.first == ip) { if (t) *t = rec.second; return true; } } return false; } TEST(ban, add) { test_core pr_core; cryptonote::t_cryptonote_protocol_handler<test_core> cprotocol(pr_core, NULL); Server server(cprotocol); cprotocol.set_p2p_endpoint(&server); // starts empty ASSERT_TRUE(server.get_blocked_ips().empty()); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,4))); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,5))); // add an IP ASSERT_TRUE(server.block_ip(MAKE_IP(1,2,3,4))); ASSERT_TRUE(server.get_blocked_ips().size() == 1); ASSERT_TRUE(is_blocked(server,MAKE_IP(1,2,3,4))); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,5))); // add the same, should not change ASSERT_TRUE(server.block_ip(MAKE_IP(1,2,3,4))); ASSERT_TRUE(server.get_blocked_ips().size() == 1); ASSERT_TRUE(is_blocked(server,MAKE_IP(1,2,3,4))); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,5))); // remove an unblocked IP, should not change ASSERT_FALSE(server.unblock_ip(MAKE_IP(1,2,3,5))); ASSERT_TRUE(server.get_blocked_ips().size() == 1); ASSERT_TRUE(is_blocked(server,MAKE_IP(1,2,3,4))); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,5))); // remove the IP, ends up empty ASSERT_TRUE(server.unblock_ip(MAKE_IP(1,2,3,4))); ASSERT_TRUE(server.get_blocked_ips().size() == 0); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,4))); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,5))); // remove the IP from an empty list, still empty ASSERT_FALSE(server.unblock_ip(MAKE_IP(1,2,3,4))); ASSERT_TRUE(server.get_blocked_ips().size() == 0); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,4))); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,5))); // add two for known amounts of time, they're both blocked ASSERT_TRUE(server.block_ip(MAKE_IP(1,2,3,4), 1)); ASSERT_TRUE(server.block_ip(MAKE_IP(1,2,3,5), 3)); ASSERT_TRUE(server.get_blocked_ips().size() == 2); ASSERT_TRUE(is_blocked(server,MAKE_IP(1,2,3,4)));<|fim▁hole|> // these tests would need to call is_remote_ip_allowed, which is private #if 0 // after two seconds, the first IP is unblocked, but not the second yet sleep(2); ASSERT_TRUE(server.get_blocked_ips().size() == 1); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,4))); ASSERT_TRUE(is_blocked(server,MAKE_IP(1,2,3,5))); // after two more seconds, the second IP is also unblocked sleep(2); ASSERT_TRUE(server.get_blocked_ips().size() == 0); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,4))); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,5))); #endif // add an IP again, then re-ban for longer, then shorter time_t t; ASSERT_TRUE(server.block_ip(MAKE_IP(1,2,3,4), 2)); ASSERT_TRUE(server.get_blocked_ips().size() == 1); ASSERT_TRUE(is_blocked(server,MAKE_IP(1,2,3,4), &t)); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,5))); ASSERT_TRUE(t >= 1); ASSERT_TRUE(server.block_ip(MAKE_IP(1,2,3,4), 9)); ASSERT_TRUE(server.get_blocked_ips().size() == 1); ASSERT_TRUE(is_blocked(server,MAKE_IP(1,2,3,4), &t)); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,5))); ASSERT_TRUE(t >= 8); ASSERT_TRUE(server.block_ip(MAKE_IP(1,2,3,4), 5)); ASSERT_TRUE(server.get_blocked_ips().size() == 1); ASSERT_TRUE(is_blocked(server,MAKE_IP(1,2,3,4), &t)); ASSERT_FALSE(is_blocked(server,MAKE_IP(1,2,3,5))); ASSERT_TRUE(t >= 4); }<|fim▁end|>
ASSERT_TRUE(is_blocked(server,MAKE_IP(1,2,3,5))); ASSERT_TRUE(server.unblock_ip(MAKE_IP(1,2,3,4))); ASSERT_TRUE(server.unblock_ip(MAKE_IP(1,2,3,5)));
<|file_name|>headergen.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # Copyright (C) 2010-2011 Marcin Kościelnicki <[email protected]> # Copyright (C) 2010 Luca Barbieri <[email protected]> # Copyright (C) 2010 Marcin Slusarz <[email protected]> # All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice (including the next # paragraph) shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. import rnn import sys startcol = 64 fouts = {} def printdef(name, val, file): fout = fouts[file] fout.write("#define {}{} {}\n".format(name, " " * (startcol - len(name)), val)) def printvalue(val, shift): if val.varinfo.dead: return if val.value is not None: printdef(val.fullname, hex(val.value << shift), val.file) def printtypeinfo(ti, prefix, shift, file): if isinstance(ti, rnn.TypeHex) or isinstance(ti, rnn.TypeInt): if ti.shr: printdef (prefix + "__SHR", str(ti.shr), file) if ti.min is not None: printdef (prefix + "__MIN", hex(ti.min), file) if ti.max is not None: printdef (prefix + "__MAX", hex(ti.max), file) if ti.align is not None: printdef (prefix + "__ALIGN", hex(ti.align), file) if isinstance(ti, rnn.TypeFixed): if ti.min is not None: printdef (prefix + "__MIN", hex(ti.min), file) if ti.max is not None: printdef (prefix + "__MAX", hex(ti.max), file) if ti.radix is not None: printdef (prefix + "__RADIX", str(ti.radix), file) if isinstance(ti, rnn.Enum) and ti.inline: for val in ti.vals: printvalue(val, shift) if isinstance(ti, rnn.Bitset) and ti.inline: for bitfield in ti.bitfields: printbitfield(bitfield, shift) def printbitfield(bf, shift): if bf.varinfo.dead: return if isinstance(bf.typeinfo, rnn.TypeBoolean): printdef(bf.fullname, hex(bf.mask << shift), bf.file) else: printdef(bf.fullname + "__MASK", hex(bf.mask << shift), bf.file) printdef(bf.fullname + "__SHIFT", str(bf.low + shift), bf.file) printtypeinfo(bf.typeinfo, bf.fullname, bf.low + shift, bf.file) def printdelem(elem, offset, strides): if elem.varinfo.dead: return if elem.length != 1: strides = strides + [elem.stride] offset = offset + elem.offset if elem.name: if strides: name = elem.fullname + '(' + ", ".join("i{}".format(i) for i in range(len(strides))) + ')' val = '(' + hex(offset) + "".join(" + {:x} * i{}".format(stride, i) for i, stride in enumerate(strides)) + ')' printdef(name, val, elem.file) else: printdef(elem.fullname, hex(offset), elem.file) if elem.stride: printdef(elem.fullname +"__ESIZE", hex(elem.stride), elem.file) if elem.length != 1: printdef(elem.fullname + "__LEN", hex(elem.length), elem.file) if isinstance(elem, rnn.Reg): printtypeinfo(elem.typeinfo, elem.fullname, 0, elem.file) fouts[elem.file].write("\n") if isinstance(elem, rnn.Stripe): for subelem in elem.elems: printdelem(subelem, offset, strides) def print_file_info(fout, file): #struct stat sb; #struct tm tm; #stat(file, &sb); #gmtime_r(&sb.st_mtime, &tm); #char timestr[64]; #strftime(timestr, sizeof(timestr), "%Y-%m-%d %H:%M:%S", tm); #fprintf(dst, "(%7Lu bytes, from %s)\n", (unsigned long long)sb->st_size, timestr); fout.write("\n") def printhead(file, db): fout = fouts[file] fout.write("#ifndef {}\n".format(guard(file))) fout.write("#define {}\n".format(guard(file))) fout.write("\n") fout.write( "/* Autogenerated file, DO NOT EDIT manually!\n" "\n" "This file was generated by the rules-ng-ng headergen tool in this git repository:\n" "http://github.com/envytools/envytools/\n" "git clone https://github.com/envytools/envytools.git\n" "\n" "The rules-ng-ng source files this header was generated from are:\n") #struct stat sb; #struct tm tm; #stat(f.name, &sb); #gmtime_r(&sb.st_mtime, &tm); maxlen = max(len(file) for file in db.files) for file in db.files: fout.write("- {} ".format(file + " " * (maxlen - len(file)))) print_file_info(fout, file) fout.write( "\n" "Copyright (C) ") #if(db->copyright.firstyear && db->copyright.firstyear < (1900 + tm.tm_year)) # fout.write("%u-", db->copyright.firstyear); #fout.write("%u", 1900 + tm.tm_year); if db.copyright.authors: fout.write(" by the following authors:") for author in db.copyright.authors: fout.write("\n- ") if author.name: fout.write(author.name) if author.email: fout.write(" <{}>".format(author.email)) if author.nicknames: fout.write(" ({})".format(", ".join(author.nicknames))) fout.write("\n") if db.copyright.license: fout.write("\n{}\n".format(db.copyright.license)) fout.write("*/\n\n\n") def guard(file): return ''.join(c.upper() if c.isalnum() else '_' for c in file) def process(mainfile): db = rnn.Database() rnn.parsefile(db, mainfile) db.prep() for file in db.files: fouts[file] = open(file.replace('/', '_') + '.h', 'w') printhead(file, db) for enum in db.enums: if not enum.inline: for val in enum.vals: printvalue(val, 0) for bitset in db.bitsets: if not bitset.inline: for bitfield in bitset.bitfields:<|fim▁hole|> for domain in db.domains: if domain.size: printdef(domain.fullname + "__SIZE", hex(domain.size), domain.file) for elem in domain.elems: printdelem(elem, 0, []) for file in fouts: fouts[file].write("\n#endif /* {} */\n".format(guard(file))) fouts[file].close() return db.estatus if len(sys.argv) < 2: sys.stdout.write ("Usage:\n" "\theadergen file.xml\n" ) sys.exit(2) sys.exit(process(sys.argv[1]))<|fim▁end|>
printbitfield(bitfield, 0)
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from .pidSVG import *<|fim▁end|>
# simple __init__.py
<|file_name|>characterize.py<|end_file_name|><|fim▁begin|>import re<|fim▁hole|> import mpl_tools import vtk_tools def load_pdb(name): with open(name+'.pdb') as fp: points = [] conns = [] for line in fp: if line.startswith('HET'): pattern = r'(-?\d+.\d\d\d)' x, y, z = (float(c) for c in re.findall(pattern, line)) points.append([x, y, z]) elif line.startswith('CON'): pattern = r'(\d+)' ids = (int(c) for c in re.findall(pattern, line)) first = next(ids) conns.extend([(first-1, other-1) for other in ids]) return points, conns def extract_spheres(im): ''' credit to untubu @ stackoverflow for this still needs a lot of improvement ''' im = np.atleast_3d(im) data = ndimage.morphology.distance_transform_edt(im) max_data = ndimage.filters.maximum_filter(data, 10) maxima = data==max_data # but this includes some globally low voids min_data = ndimage.filters.minimum_filter(data, 10) diff = (max_data - min_data) > 1 maxima[diff==0] = 0 labels, num_maxima = ndimage.label(maxima) centers = [ndimage.center_of_mass(labels==i) for i in range(1, num_maxima+1)] radii = [data[center] for center in centers] return np.array(centers), np.array(radii) def rasterize(): pass solid_nodes, solid_edges = map(np.array, load_pdb('CHA')) solid_nodes -= solid_nodes.min(axis=0) solid_nodes *= 4 coord_pair = solid_nodes[solid_edges] discretized = [] for a, b in coord_pair: point_list = bresenham.bresenhamline(np.atleast_2d(a), b, -1).astype(int).tolist() discretized.extend([tuple(point) for point in point_list]) array = np.array(discretized) size = array.max(axis=0) - array.min(axis=0) + 1 canvas = np.ones(size, dtype=bool) offset = array.min(axis=0) for idx, _ in np.ndenumerate(canvas): if idx in discretized: canvas[idx] = 0 # mpl_tools.visualize(canvas) centers, radii = extract_spheres(canvas) vtk_tools.visualize(solid_nodes, solid_edges, centers, radii)<|fim▁end|>
import numpy as np from scipy import ndimage, spatial import bresenham
<|file_name|>MessageStreamDecoder.java<|end_file_name|><|fim▁begin|>/* * Created on Jan 25, 2005 * Created by Alon Rohter * Copyright (C) 2004-2005 Aelitis, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * AELITIS, SAS au capital de 46,603.30 euros * 8 Allee Lenotre, La Grille Royale, 78600 Le Mesnil le Roi, France. * */ package com.aelitis.azureus.core.peermanager.messaging; import com.aelitis.azureus.core.networkmanager.Transport; import java.io.IOException; import java.nio.ByteBuffer; /** * Decodes a message stream into separate messages. */ public interface MessageStreamDecoder { /** * Decode message stream from the given transport. * @param transport to decode from * @param max_bytes to decode/read from the stream * @return number of bytes decoded * @throws IOException on decoding error */ public int performStreamDecode( Transport transport, int max_bytes ) throws IOException; /** * Get the messages decoded from the transport, if any, from the last decode op. * @return decoded messages, or null if no new complete messages were decoded */ public Message[] removeDecodedMessages(); /** * Get the number of protocol (overhead) bytes decoded from the transport, from the last decode op. * @return number of protocol bytes recevied */<|fim▁hole|> * Get the number of (piece) data bytes decoded from the transport, from the last decode op. * @return number of data bytes received */ public int getDataBytesDecoded(); /** * Get the percentage of the current message that has already been received (read from the transport). * @return percentage complete (0-99), or -1 if no message is currently being received */ public int getPercentDoneOfCurrentMessage(); /** * Pause message decoding. */ public void pauseDecoding(); /** * Resume message decoding. */ public void resumeDecoding(); /** * Destroy this decoder, i.e. perform cleanup. * @return any bytes already-read and still remaining within the decoder */ public ByteBuffer destroy(); }<|fim▁end|>
public int getProtocolBytesDecoded(); /**
<|file_name|>FurtherBasicBeanPane.java<|end_file_name|><|fim▁begin|>package com.fr.design.beans; import com.fr.stable.StringUtils; /** * * @author zhou * @since 2012-5-30下午12:12:42 */ public abstract class FurtherBasicBeanPane<T> extends BasicBeanPane<T> { /** * 是否是指定类型 * @param ob 对象 * @return 是否是指定类型 */ public abstract boolean accept(Object ob); <|fim▁hole|> /** * title应该是一个属性,不只是对话框的标题时用到,与其他组件结合时,也会用得到 * @return 绥化狂标题 */ @Deprecated public String title4PopupWindow(){ return StringUtils.EMPTY; } /** * 重置 */ public abstract void reset(); }<|fim▁end|>
<|file_name|>audioRecorder.js<|end_file_name|><|fim▁begin|>import { AudioEncoder } from './audioEncoder'; const getUserMedia = ((navigator) => { if (navigator.mediaDevices) { return navigator.mediaDevices.getUserMedia.bind(navigator.mediaDevices); } const legacyGetUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia; if (legacyGetUserMedia) { return (options) => new Promise((resolve, reject) => { legacyGetUserMedia.call(navigator, options, resolve, reject); }); } })(window.navigator); const AudioContext = window.AudioContext || window.webkitAudioContext; class AudioRecorder { isSupported() { return Boolean(getUserMedia) && Boolean(AudioContext); } createAudioContext() { if (this.audioContext) { return; } this.audioContext = new AudioContext(); } destroyAudioContext() { if (!this.audioContext) { return; } this.audioContext.close(); delete this.audioContext; } async createStream() { if (this.stream) { return; } this.stream = await getUserMedia({ audio: true }); } destroyStream() { if (!this.stream) { return; } this.stream.getAudioTracks().forEach((track) => track.stop()); delete this.stream; } async createEncoder() { if (this.encoder) { return; } const input = this.audioContext.createMediaStreamSource(this.stream); this.encoder = new AudioEncoder(input); } destroyEncoder() { if (!this.encoder) { return; } this.encoder.close(); delete this.encoder;<|fim▁hole|> try { await this.createAudioContext(); await this.createStream(); await this.createEncoder(); cb && cb.call(this, true); } catch (error) { console.error(error); this.destroyEncoder(); this.destroyStream(); this.destroyAudioContext(); cb && cb.call(this, false); } } stop(cb) { this.encoder.on('encoded', cb); this.encoder.close(); this.destroyEncoder(); this.destroyStream(); this.destroyAudioContext(); } } const instance = new AudioRecorder(); export { instance as AudioRecorder };<|fim▁end|>
} async start(cb) {
<|file_name|>test_requestor.py<|end_file_name|><|fim▁begin|>import datetime import unittest2 import urlparse from mock import Mock, ANY import svb from svb.six.moves.urllib import parse from svb.test.helper import SvbUnitTestCase VALID_API_METHODS = ('get', 'post', 'delete', 'patch') class GMT1(datetime.tzinfo): def utcoffset(self, dt): return datetime.timedelta(hours=1) def dst(self, dt): return datetime.timedelta(0) def tzname(self, dt): return "Europe/Prague" class APIHeaderMatcher(object): EXP_KEYS = [ 'Authorization', 'SVB-Version', 'User-Agent', 'X-SVB-Client-User-Agent', 'X-Timestamp', 'X-Signature', ] METHOD_EXTRA_KEYS = { "post": ["Content-Type"], "patch": ["Content-Type"], } def __init__(self, api_key=None, extra={}, request_method=None, user_agent=None, app_info=None): self.request_method = request_method self.api_key = api_key or svb.api_key self.extra = extra self.user_agent = user_agent self.app_info = app_info def __eq__(self, other): return (self._keys_match(other) and self._auth_match(other) and self._user_agent_match(other) and self._x_svb_ua_contains_app_info(other) and self._extra_match(other)) def _keys_match(self, other): expected_keys = list(set(self.EXP_KEYS + self.extra.keys())) if self.request_method is not None and self.request_method in \ self.METHOD_EXTRA_KEYS: expected_keys.extend(self.METHOD_EXTRA_KEYS[self.request_method]) return (sorted(other.keys()) == sorted(expected_keys)) def _auth_match(self, other): return other['Authorization'] == "Bearer %s" % (self.api_key,) def _user_agent_match(self, other): if self.user_agent is not None: return other['User-Agent'] == self.user_agent return True def _x_svb_ua_contains_app_info(self, other): if self.app_info: ua = svb.util.json.loads(other['X-SVB-Client-User-Agent']) if 'application' not in ua: return False return ua['application'] == self.app_info return True def _extra_match(self, other): for k, v in self.extra.iteritems(): if other[k] != v: return False return True class JSONMatcher(object): def ordered(self, obj): if isinstance(obj, dict): return sorted((k, self.ordered(str(v))) for k, v in obj.items()) if isinstance(obj, list): return sorted(self.ordered(str(x)) for x in obj) else: return obj def __init__(self, expected): if isinstance(expected, dict): self.expected = self.ordered(expected) elif isinstance(expected, svb.six.text_type): self.expected = self.ordered(svb.util.json.loads(expected)) def __eq__(self, other): return self.expected == self.ordered(svb.util.json.loads(other)) class QueryMatcher(object): def __init__(self, expected): self.expected = sorted(expected) def __eq__(self, other): query = parse.urlsplit(other).query or other parsed = svb.util.parse_qsl(query) return self.expected == sorted(parsed) class UrlMatcher(object): def __init__(self, expected): self.exp_parts = parse.urlsplit(expected) def __eq__(self, other): other_parts = parse.urlsplit(other) for part in ('scheme', 'netloc', 'path', 'fragment'): expected = getattr(self.exp_parts, part) actual = getattr(other_parts, part) if expected != actual: print 'Expected %s "%s" but got "%s"' % ( part, expected, actual) return False q_matcher = QueryMatcher(svb.util.parse_qsl(self.exp_parts.query)) return q_matcher == other class APIRequestorRequestTests(SvbUnitTestCase): ENCODE_INPUTS = { 'dict': { 'astring': 'bar', 'anint': 5, 'anull': None, 'adatetime': datetime.datetime(2013, 1, 1, tzinfo=GMT1()), 'atuple': (1, 2), 'adict': {'foo': 'bar', 'boz': 5}, 'alist': ['foo', 'bar'], }, 'list': [1, 'foo', 'baz'], 'string': 'boo', 'unicode': u'\u1234', 'datetime': datetime.datetime(2013, 1, 1, second=1, tzinfo=GMT1()), 'none': None, } ENCODE_EXPECTATIONS = { 'dict': [ ('%s[astring]', 'bar'), ('%s[anint]', 5), ('%s[adatetime]', 1356994800), ('%s[adict][foo]', 'bar'), ('%s[adict][boz]', 5), ('%s[alist][]', 'foo'), ('%s[alist][]', 'bar'), ('%s[atuple][]', 1), ('%s[atuple][]', 2), ], 'list': [ ('%s[]', 1), ('%s[]', 'foo'), ('%s[]', 'baz'), ], 'string': [('%s', 'boo')], 'unicode': [('%s', svb.util.utf8(u'\u1234'))], 'datetime': [('%s', 1356994801)], 'none': [], } def setUp(self): super(APIRequestorRequestTests, self).setUp() self.http_client = Mock(svb.http_client.HTTPClient) self.http_client._verify_ssl_certs = True self.http_client.name = 'mockclient' self.requestor = svb.api_requestor.APIRequestor( client=self.http_client) def mock_response(self, return_body, return_code, requestor=None, headers=None): if not requestor: requestor = self.requestor self.http_client.request = Mock( return_value=(return_body, return_code, headers or {})) def check_call(self, meth, abs_url=None, headers=None, post_data=None, requestor=None): if not abs_url: abs_url = 'https://api.svb.com%s' % (self.valid_path,) if not requestor: requestor = self.requestor if not headers: headers = APIHeaderMatcher(request_method=meth) self.http_client.request.assert_called_with( meth, abs_url, headers, post_data) @property def valid_path(self): return '/foo' def encoder_check(self, key): stk_key = "my%s" % (key,) value = self.ENCODE_INPUTS[key] expectation = [(k % (stk_key,), v) for k, v in self.ENCODE_EXPECTATIONS[key]] stk = [] fn = getattr(svb.api_requestor.APIRequestor, "encode_%s" % (key,)) fn(stk, stk_key, value) if isinstance(value, dict): expectation.sort() stk.sort() self.assertEqual(expectation, stk) def _test_encode_naive_datetime(self): stk = [] svb.api_requestor.APIRequestor.encode_datetime( stk, 'test', datetime.datetime(2013, 1, 1)) # Naive datetimes will encode differently depending on your system # local time. Since we don't know the local time of your system, # we just check that naive encodings are within 24 hours of correct. self.assertTrue(60 * 60 * 24 > abs(stk[0][1] - 1356994800)) def test_param_encoding(self): self.mock_response('{}', 200) self.requestor.request('get', '', self.ENCODE_INPUTS) expectation = [] for type_, values in self.ENCODE_EXPECTATIONS.iteritems(): expectation.extend([(k % (type_,), str(v)) for k, v in values]) self.check_call('get', QueryMatcher(expectation)) def test_dictionary_list_encoding(self): params = { 'foo': { '0': { 'bar': 'bat', } } } encoded = list(svb.api_requestor._api_encode(params)) key, value = encoded[0] self.assertEqual('foo[0][bar]', key) self.assertEqual('bat', value) def test_url_construction(self): CASES = ( ('https://api.svb.com?foo=bar', '', {'foo': 'bar'}), ('https://api.svb.com?foo=bar', '?', {'foo': 'bar'}), ('https://api.svb.com', '', {}), ( 'https://api.svb.com/%20spaced?foo=bar%24&baz=5', '/%20spaced?foo=bar%24', {'baz': '5'} ), ( 'https://api.svb.com?foo=bar&foo=bar', '?foo=bar', {'foo': 'bar'} ), ) for expected, url, params in CASES: self.mock_response('{}', 200) self.requestor.request('get', url, params) self.check_call('get', expected) def test_empty_methods(self): for meth in VALID_API_METHODS: self.mock_response('{}', 200) body, key = self.requestor.request(meth, self.valid_path, {}) if meth == 'post' or meth == 'patch': post_data = svb.util.json.dumps({'data': {}}) else: post_data = None self.check_call(meth, post_data=post_data) self.assertEqual({}, body) def test_methods_with_params_and_response(self): for meth in VALID_API_METHODS: self.mock_response('{"foo": "bar", "baz": 6}', 200) params = { 'alist': [1, 2, 3], 'adict': {'frobble': 'bits'}, 'adatetime': datetime.datetime(2013, 1, 1, tzinfo=GMT1()) } encoded = ('adict%5Bfrobble%5D=bits&adatetime=1356994800&' 'alist%5B%5D=1&alist%5B%5D=2&alist%5B%5D=3') body, key = self.requestor.request(meth, self.valid_path, params) self.assertEqual({'foo': 'bar', 'baz': 6}, body) if meth == 'post' or meth == 'patch': x = JSONMatcher({"data": dict(svb.util.parse_qsl(encoded))}) self.check_call( meth, post_data=JSONMatcher(svb.util.json.dumps( { "data": dict(svb.util.parse_qsl(encoded)) }))) else: abs_url = "https://api.svb.com%s?%s" % ( self.valid_path, encoded) self.check_call(meth, abs_url=UrlMatcher(abs_url)) def test_uses_headers(self): self.mock_response('{}', 200) self.requestor.request('get', self.valid_path, {}, {'foo': 'bar'}) self.check_call('get', headers=APIHeaderMatcher(extra={'foo': 'bar'})) def test_uses_instance_key(self): key = 'fookey' requestor = svb.api_requestor.APIRequestor(key, client=self.http_client) self.mock_response('{}', 200, requestor=requestor) body, used_key = requestor.request('get', self.valid_path, {}) self.check_call('get', headers=APIHeaderMatcher( key, request_method='get'), requestor=requestor) self.assertEqual(key, used_key) def test_uses_instance_api_version(self): api_version = 'fooversion' requestor = svb.api_requestor.APIRequestor(api_version=api_version, client=self.http_client) self.mock_response('{}', 200, requestor=requestor)<|fim▁hole|> extra={'SVB-Version': 'fooversion'}, request_method='get'), requestor=requestor) def test_uses_instance_account(self): account = 'acct_foo' requestor = svb.api_requestor.APIRequestor(account=account, client=self.http_client) self.mock_response('{}', 200, requestor=requestor) requestor.request('get', self.valid_path, {}) self.check_call( 'get', requestor=requestor, headers=APIHeaderMatcher( extra={'SVB-Account': account}, request_method='get' ), ) def test_uses_app_info(self): try: old = svb.app_info svb.set_app_info( 'MyAwesomePlugin', url='https://myawesomeplugin.info', version='1.2.34' ) self.mock_response('{}', 200) self.requestor.request('get', self.valid_path, {}) ua = "SVB/v1 PythonBindings/%s" % (svb.version.VERSION,) ua += " MyAwesomePlugin/1.2.34 (https://myawesomeplugin.info)" header_matcher = APIHeaderMatcher( user_agent=ua, app_info={ 'name': 'MyAwesomePlugin', 'url': 'https://myawesomeplugin.info', 'version': '1.2.34', } ) self.check_call('get', headers=header_matcher) finally: svb.app_info = old def test_fails_without_api_key(self): svb.api_key = None self.assertRaises(svb.error.AuthenticationError, self.requestor.request, 'get', self.valid_path, {}) def test_not_found(self): self.mock_response('{"error": {}}', 404) self.assertRaises(svb.error.InvalidRequestError, self.requestor.request, 'get', self.valid_path, {}) def test_authentication_error(self): self.mock_response('{"error": {}}', 401) self.assertRaises(svb.error.AuthenticationError, self.requestor.request, 'get', self.valid_path, {}) def test_permissions_error(self): self.mock_response('{"error": {}}', 403) self.assertRaises(svb.error.PermissionError, self.requestor.request, 'get', self.valid_path, {}) def test_card_error(self): self.mock_response('{"error": {}}', 402) self.assertRaises(svb.error.CardError, self.requestor.request, 'get', self.valid_path, {}) def test_rate_limit_error(self): self.mock_response('{"error": {}}', 429) self.assertRaises(svb.error.RateLimitError, self.requestor.request, 'get', self.valid_path, {}) def test_server_error(self): self.mock_response('{"error": {}}', 500) self.assertRaises(svb.error.APIError, self.requestor.request, 'get', self.valid_path, {}) def test_invalid_json(self): self.mock_response('{', 200) self.assertRaises(svb.error.APIError, self.requestor.request, 'get', self.valid_path, {}) def test_invalid_method(self): self.assertRaises(svb.error.APIConnectionError, self.requestor.request, 'foo', 'bar') class DefaultClientTests(unittest2.TestCase): def setUp(self): svb.default_http_client = None svb.api_key = 'foo' def test_default_http_client_called(self): hc = Mock(svb.http_client.HTTPClient) hc._verify_ssl_certs = True hc.name = 'mockclient' hc.request = Mock(return_value=("{}", 200, {})) svb.default_http_client = hc svb.ACH.list(limit=3) hc.request.assert_called_with( 'get', 'https://api.svb.com/v1/ach?limit=3', ANY, None) def tearDown(self): svb.api_key = None svb.default_http_client = None if __name__ == '__main__': unittest2.main()<|fim▁end|>
requestor.request('get', self.valid_path, {}) self.check_call('get', headers=APIHeaderMatcher(
<|file_name|>p11.py<|end_file_name|><|fim▁begin|>import urllib.request import time preço = 99.99 #algum valor maior while preço >= 4.74: pagina = urllib.request.urlopen( 'http://beans.itcarlow.ie/prices-loyalty.html') texto = pagina.read().decode('utf8') onde = texto.find('>$') início = onde + 2 fim = início + 4 preço = float(texto[início:fim]) if preço >= 4.74: print ('Espera...') time.sleep(600) <|fim▁hole|><|fim▁end|>
print ('Comprar! Preço: %5.2f' %preço)
<|file_name|>jquery.js<|end_file_name|><|fim▁begin|>/* * jQuery - New Wave Javascript * * Copyright (c) 2006 John Resig (jquery.com) * Dual licensed under the MIT (MIT-LICENSE.txt) * and GPL (GPL-LICENSE.txt) licenses. * * $Date: 2006-10-27 23:14:48 -0400 (Fri, 27 Oct 2006) $ * $Rev: 509 $ */ // Global undefined variable window.undefined = window.undefined; function jQuery(a,c) { // Shortcut for document ready (because $(document).each() is silly) if ( a && a.constructor == Function && jQuery.fn.ready ) return jQuery(document).ready(a); // Make sure that a selection was provided a = a || jQuery.context || document; // Watch for when a jQuery object is passed as the selector if ( a.jquery ) return $( jQuery.merge( a, [] ) ); // Watch for when a jQuery object is passed at the context if ( c && c.jquery ) return $( c ).find(a); // If the context is global, return a new object if ( window == this ) return new jQuery(a,c); // Handle HTML strings var m = /^[^<]*(<.+>)[^>]*$/.exec(a); if ( m ) a = jQuery.clean( [ m[1] ] ); // Watch for when an array is passed in this.get( a.constructor == Array || a.length && !a.nodeType && a[0] != undefined && a[0].nodeType ? // Assume that it is an array of DOM Elements jQuery.merge( a, [] ) : // Find the matching elements and save them for later jQuery.find( a, c ) ); // See if an extra function was provided var fn = arguments[ arguments.length - 1 ]; // If so, execute it in context if ( fn && fn.constructor == Function ) this.each(fn); } // Map over the $ in case of overwrite if ( $ ) jQuery._$ = $; // Map the jQuery namespace to the '$' one var $ = jQuery; jQuery.fn = jQuery.prototype = { jquery: "$Rev: 509 $", size: function() { return this.length; }, get: function( num ) { // Watch for when an array (of elements) is passed in if ( num && num.constructor == Array ) { // Use a tricky hack to make the jQuery object // look and feel like an array this.length = 0; [].push.apply( this, num ); return this; } else return num == undefined ? // Return a 'clean' array jQuery.map( this, function(a){ return a } ) : // Return just the object this[num]; }, each: function( fn, args ) { return jQuery.each( this, fn, args ); }, index: function( obj ) { var pos = -1; this.each(function(i){ if ( this == obj ) pos = i; }); return pos; }, attr: function( key, value, type ) { // Check to see if we're setting style values return key.constructor != String || value != undefined ? this.each(function(){ // See if we're setting a hash of styles if ( value == undefined ) // Set all the styles for ( var prop in key ) jQuery.attr( type ? this.style : this, prop, key[prop] ); // See if we're setting a single key/value style else jQuery.attr( type ? this.style : this, key, value ); }) : // Look for the case where we're accessing a style value jQuery[ type || "attr" ]( this[0], key ); }, css: function( key, value ) { return this.attr( key, value, "curCSS" ); }, text: function(e) { e = e || this; var t = ""; for ( var j = 0; j < e.length; j++ ) { var r = e[j].childNodes; for ( var i = 0; i < r.length; i++ ) t += r[i].nodeType != 1 ? r[i].nodeValue : jQuery.fn.text([ r[i] ]); } return t; }, wrap: function() { // The elements to wrap the target around var a = jQuery.clean(arguments); // Wrap each of the matched elements individually return this.each(function(){ // Clone the structure that we're using to wrap var b = a[0].cloneNode(true); // Insert it before the element to be wrapped this.parentNode.insertBefore( b, this ); // Find he deepest point in the wrap structure while ( b.firstChild ) b = b.firstChild; // Move the matched element to within the wrap structure b.appendChild( this ); }); }, append: function() { return this.domManip(arguments, true, 1, function(a){ this.appendChild( a ); }); }, prepend: function() { return this.domManip(arguments, true, -1, function(a){ this.insertBefore( a, this.firstChild ); }); }, before: function() { return this.domManip(arguments, false, 1, function(a){ this.parentNode.insertBefore( a, this ); }); }, after: function() { return this.domManip(arguments, false, -1, function(a){ this.parentNode.insertBefore( a, this.nextSibling ); }); }, end: function() { return this.get( this.stack.pop() ); }, find: function(t) { return this.pushStack( jQuery.map( this, function(a){ return jQuery.find(t,a); }), arguments ); }, clone: function(deep) { return this.pushStack( jQuery.map( this, function(a){ return a.cloneNode( deep != undefined ? deep : true ); }), arguments ); }, filter: function(t) { return this.pushStack( t.constructor == Array && jQuery.map(this,function(a){ for ( var i = 0; i < t.length; i++ ) if ( jQuery.filter(t[i],[a]).r.length ) return a; }) || t.constructor == Boolean && ( t ? this.get() : [] ) || t.constructor == Function && jQuery.grep( this, t ) || jQuery.filter(t,this).r, arguments ); }, not: function(t) { return this.pushStack( t.constructor == String ? jQuery.filter(t,this,false).r : jQuery.grep(this,function(a){ return a != t; }), arguments ); }, add: function(t) { return this.pushStack( jQuery.merge( this, t.constructor == String ? jQuery.find(t) : t.constructor == Array ? t : [t] ), arguments ); }, is: function(expr) { return expr ? jQuery.filter(expr,this).r.length > 0 : this.length > 0; }, domManip: function(args, table, dir, fn){ var clone = this.size() > 1; var a = jQuery.clean(args); return this.each(function(){ var obj = this; if ( table && this.nodeName == "TABLE" && a[0].nodeName != "THEAD" ) { var tbody = this.getElementsByTagName("tbody"); if ( !tbody.length ) { obj = document.createElement("tbody"); this.appendChild( obj ); } else obj = tbody[0]; } for ( var i = ( dir < 0 ? a.length - 1 : 0 ); i != ( dir < 0 ? dir : a.length ); i += dir ) { fn.apply( obj, [ clone ? a[i].cloneNode(true) : a[i] ] ); } }); }, pushStack: function(a,args) { var fn = args && args[args.length-1]; if ( !fn || fn.constructor != Function ) { if ( !this.stack ) this.stack = []; this.stack.push( this.get() ); this.get( a ); } else { var old = this.get(); this.get( a ); if ( fn.constructor == Function ) return this.each( fn ); this.get( old ); } return this; } }; jQuery.extend = jQuery.fn.extend = function(obj,prop) { if ( !prop ) { prop = obj; obj = this; } for ( var i in prop ) obj[i] = prop[i]; return obj; }; jQuery.extend({ init: function(){ jQuery.initDone = true; jQuery.each( jQuery.macros.axis, function(i,n){ jQuery.fn[ i ] = function(a) { var ret = jQuery.map(this,n); if ( a && a.constructor == String ) ret = jQuery.filter(a,ret).r; return this.pushStack( ret, arguments ); }; }); jQuery.each( jQuery.macros.to, function(i,n){ jQuery.fn[ i ] = function(){ var a = arguments; return this.each(function(){ for ( var j = 0; j < a.length; j++ ) $(a[j])[n]( this ); }); }; }); jQuery.each( jQuery.macros.each, function(i,n){ jQuery.fn[ i ] = function() { return this.each( n, arguments ); }; }); jQuery.each( jQuery.macros.filter, function(i,n){ jQuery.fn[ n ] = function(num,fn) { return this.filter( ":" + n + "(" + num + ")", fn ); }; }); jQuery.each( jQuery.macros.attr, function(i,n){ n = n || i; jQuery.fn[ i ] = function(h) { return h == undefined ? this.length ? this[0][n] : null : this.attr( n, h ); }; }); jQuery.each( jQuery.macros.css, function(i,n){ jQuery.fn[ n ] = function(h) { return h == undefined ? ( this.length ? jQuery.css( this[0], n ) : null ) : this.css( n, h ); }; }); }, each: function( obj, fn, args ) { if ( obj.length == undefined ) for ( var i in obj ) fn.apply( obj[i], args || [i, obj[i]] ); else for ( var i = 0; i < obj.length; i++ ) fn.apply( obj[i], args || [i, obj[i]] ); return obj; }, className: { add: function(o,c){ if (jQuery.className.has(o,c)) return; o.className += ( o.className ? " " : "" ) + c; }, remove: function(o,c){ o.className = !c ? "" : o.className.replace( new RegExp("(^|\\s*\\b[^-])"+c+"($|\\b(?=[^-]))", "g"), ""); }, has: function(e,a) { if ( e.className != undefined ) e = e.className; return new RegExp("(^|\\s)" + a + "(\\s|$)").test(e); } }, swap: function(e,o,f) { for ( var i in o ) { e.style["old"+i] = e.style[i]; e.style[i] = o[i]; } f.apply( e, [] ); for ( var i in o ) e.style[i] = e.style["old"+i]; }, css: function(e,p) { if ( p == "height" || p == "width" ) { var old = {}, oHeight, oWidth, d = ["Top","Bottom","Right","Left"]; for ( var i in d ) { old["padding" + d[i]] = 0; old["border" + d[i] + "Width"] = 0; } jQuery.swap( e, old, function() { if (jQuery.css(e,"display") != "none") { oHeight = e.offsetHeight; oWidth = e.offsetWidth; } else { e = $(e.cloneNode(true)).css({ visibility: "hidden", position: "absolute", display: "block" }).prependTo("body")[0]; oHeight = e.clientHeight; oWidth = e.clientWidth; e.parentNode.removeChild(e); } }); return p == "height" ? oHeight : oWidth; } else if ( p == "opacity" && jQuery.browser.msie ) return parseFloat( jQuery.curCSS(e,"filter").replace(/[^0-9.]/,"") ) || 1; return jQuery.curCSS( e, p ); }, curCSS: function(elem, prop, force) { var ret; if (!force && elem.style[prop]) { ret = elem.style[prop]; } else if (elem.currentStyle) { var newProp = prop.replace(/\-(\w)/g,function(m,c){return c.toUpperCase()}); ret = elem.currentStyle[prop] || elem.currentStyle[newProp]; } else if (document.defaultView && document.defaultView.getComputedStyle) { prop = prop.replace(/([A-Z])/g,"-$1").toLowerCase(); var cur = document.defaultView.getComputedStyle(elem, null); if ( cur ) ret = cur.getPropertyValue(prop); else if ( prop == 'display' ) ret = 'none'; else jQuery.swap(elem, { display: 'block' }, function() { ret = document.defaultView.getComputedStyle(this,null).getPropertyValue(prop); }); } return ret; }, clean: function(a) { var r = []; for ( var i = 0; i < a.length; i++ ) { if ( a[i].constructor == String ) { var table = ""; if ( !a[i].indexOf("<thead") || !a[i].indexOf("<tbody") ) { table = "thead"; a[i] = "<table>" + a[i] + "</table>"; } else if ( !a[i].indexOf("<tr") ) { table = "tr"; a[i] = "<table>" + a[i] + "</table>"; } else if ( !a[i].indexOf("<td") || !a[i].indexOf("<th") ) { table = "td"; a[i] = "<table><tbody><tr>" + a[i] + "</tr></tbody></table>"; } var div = document.createElement("div"); div.innerHTML = a[i]; if ( table ) { div = div.firstChild; if ( table != "thead" ) div = div.firstChild; if ( table == "td" ) div = div.firstChild; } for ( var j = 0; j < div.childNodes.length; j++ ) r.push( div.childNodes[j] ); } else if ( a[i].jquery || a[i].length && !a[i].nodeType ) for ( var k = 0; k < a[i].length; k++ ) r.push( a[i][k] ); else if ( a[i] !== null ) r.push( a[i].nodeType ? a[i] : document.createTextNode(a[i].toString()) ); } return r; }, expr: { "": "m[2]== '*'||a.nodeName.toUpperCase()==m[2].toUpperCase()", "#": "a.getAttribute('id')&&a.getAttribute('id')==m[2]", ":": { // Position Checks lt: "i<m[3]-0", gt: "i>m[3]-0", nth: "m[3]-0==i", eq: "m[3]-0==i", first: "i==0", last: "i==r.length-1", even: "i%2==0", odd: "i%2", // Child Checks "first-child": "jQuery.sibling(a,0).cur", "last-child": "jQuery.sibling(a,0).last", "only-child": "jQuery.sibling(a).length==1", // Parent Checks parent: "a.childNodes.length", empty: "!a.childNodes.length", // Text Check contains: "(a.innerText||a.innerHTML).indexOf(m[3])>=0", // Visibility visible: "a.type!='hidden'&&jQuery.css(a,'display')!='none'&&jQuery.css(a,'visibility')!='hidden'", hidden: "a.type=='hidden'||jQuery.css(a,'display')=='none'||jQuery.css(a,'visibility')=='hidden'", // Form elements enabled: "!a.disabled", disabled: "a.disabled", checked: "a.checked", selected: "a.selected" }, ".": "jQuery.className.has(a,m[2])", "@": { "=": "z==m[4]", "!=": "z!=m[4]", "^=": "!z.indexOf(m[4])", "$=": "z.substr(z.length - m[4].length,m[4].length)==m[4]", "*=": "z.indexOf(m[4])>=0", "": "z" }, "[": "jQuery.find(m[2],a).length" }, token: [ "\\.\\.|/\\.\\.", "a.parentNode", ">|/", "jQuery.sibling(a.firstChild)", "\\+", "jQuery.sibling(a).next", "~", function(a){ var r = []; var s = jQuery.sibling(a); if ( s.n > 0 ) for ( var i = s.n; i < s.length; i++ ) r.push( s[i] ); return r; } ], find: function( t, context ) { // Make sure that the context is a DOM Element if ( context && context.nodeType == undefined ) context = null; // Set the correct context (if none is provided) context = context || jQuery.context || document; if ( t.constructor != String ) return [t]; if ( !t.indexOf("//") ) { context = context.documentElement; t = t.substr(2,t.length); } else if ( !t.indexOf("/") ) { context = context.documentElement; t = t.substr(1,t.length); // FIX Assume the root element is right :( if ( t.indexOf("/") >= 1 ) t = t.substr(t.indexOf("/"),t.length); } var ret = [context]; var done = []; var last = null; while ( t.length > 0 && last != t ) { var r = []; last = t; t = jQuery.trim(t).replace( /^\/\//i, "" ); var foundToken = false; for ( var i = 0; i < jQuery.token.length; i += 2 ) { var re = new RegExp("^(" + jQuery.token[i] + ")"); var m = re.exec(t); if ( m ) { r = ret = jQuery.map( ret, jQuery.token[i+1] ); t = jQuery.trim( t.replace( re, "" ) ); foundToken = true; } } if ( !foundToken ) { if ( !t.indexOf(",") || !t.indexOf("|") ) { if ( ret[0] == context ) ret.shift(); done = jQuery.merge( done, ret ); r = ret = [context]; t = " " + t.substr(1,t.length); } else { var re2 = /^([#.]?)([a-z0-9\\*_-]*)/i; var m = re2.exec(t); if ( m[1] == "#" ) { // Ummm, should make this work in all XML docs var oid = document.getElementById(m[2]); r = ret = oid ? [oid] : []; t = t.replace( re2, "" ); } else { if ( !m[2] || m[1] == "." ) m[2] = "*"; for ( var i = 0; i < ret.length; i++ ) r = jQuery.merge( r, m[2] == "*" ? jQuery.getAll(ret[i]) : ret[i].getElementsByTagName(m[2]) ); } } } if ( t ) { var val = jQuery.filter(t,r); ret = r = val.r; t = jQuery.trim(val.t); } } if ( ret && ret[0] == context ) ret.shift(); done = jQuery.merge( done, ret ); return done; }, getAll: function(o,r) { r = r || []; var s = o.childNodes; for ( var i = 0; i < s.length; i++ ) if ( s[i].nodeType == 1 ) { r.push( s[i] ); jQuery.getAll( s[i], r ); } return r; }, attr: function(elem, name, value){ var fix = { "for": "htmlFor", "class": "className", "float": "cssFloat", innerHTML: "innerHTML", className: "className" }; if ( fix[name] ) { if ( value != undefined ) elem[fix[name]] = value; return elem[fix[name]]; } else if ( elem.getAttribute ) { if ( value != undefined ) elem.setAttribute( name, value ); return elem.getAttribute( name, 2 ); } else { name = name.replace(/-([a-z])/ig,function(z,b){return b.toUpperCase();}); if ( value != undefined ) elem[name] = value; return elem[name]; } }, // The regular expressions that power the parsing engine parse: [ // Match: [@value='test'], [@foo] [ "\\[ *(@)S *([!*$^=]*) *Q\\]", 1 ], // Match: [div], [div p] [ "(\\[)Q\\]", 0 ], // Match: :contains('foo') [ "(:)S\\(Q\\)", 0 ], // Match: :even, :last-chlid [ "([:.#]*)S", 0 ] ], filter: function(t,r,not) { // Figure out if we're doing regular, or inverse, filtering var g = not !== false ? jQuery.grep : function(a,f) {return jQuery.grep(a,f,true);}; while ( t && /^[a-z[({<*:.#]/i.test(t) ) { var p = jQuery.parse; for ( var i = 0; i < p.length; i++ ) { var re = new RegExp( "^" + p[i][0] // Look for a string-like sequence .replace( 'S', "([a-z*_-][a-z0-9_-]*)" ) // Look for something (optionally) enclosed with quotes .replace( 'Q', " *'?\"?([^'\"]*?)'?\"? *" ), "i" ); var m = re.exec( t ); if ( m ) { // Re-organize the match if ( p[i][1] ) m = ["", m[1], m[3], m[2], m[4]]; // Remove what we just matched t = t.replace( re, "" ); break; } } // :not() is a special case that can be optomized by // keeping it out of the expression list if ( m[1] == ":" && m[2] == "not" ) r = jQuery.filter(m[3],r,false).r; // Otherwise, find the expression to execute else { var f = jQuery.expr[m[1]]; if ( f.constructor != String ) f = jQuery.expr[m[1]][m[2]]; // Build a custom macro to enclose it eval("f = function(a,i){" + ( m[1] == "@" ? "z=jQuery.attr(a,m[3]);" : "" ) + "return " + f + "}"); // Execute it against the current filter r = g( r, f ); } } // Return an array of filtered elements (r) // and the modified expression string (t) return { r: r, t: t }; }, trim: function(t){ return t.replace(/^\s+|\s+$/g, ""); }, parents: function( elem ){ var matched = []; var cur = elem.parentNode; while ( cur && cur != document ) { matched.push( cur ); cur = cur.parentNode; } return matched; }, sibling: function(elem, pos, not) { var elems = []; var siblings = elem.parentNode.childNodes; for ( var i = 0; i < siblings.length; i++ ) { if ( not === true && siblings[i] == elem ) continue; if ( siblings[i].nodeType == 1 ) elems.push( siblings[i] ); if ( siblings[i] == elem ) elems.n = elems.length - 1; } return jQuery.extend( elems, { last: elems.n == elems.length - 1, cur: pos == "even" && elems.n % 2 == 0 || pos == "odd" && elems.n % 2 || elems[pos] == elem, prev: elems[elems.n - 1], next: elems[elems.n + 1] }); }, merge: function(first, second) { var result = []; // Move b over to the new array (this helps to avoid // StaticNodeList instances) for ( var k = 0; k < first.length; k++ ) result[k] = first[k]; // Now check for duplicates between a and b and only // add the unique items for ( var i = 0; i < second.length; i++ ) { var noCollision = true; // The collision-checking process for ( var j = 0; j < first.length; j++ ) if ( second[i] == first[j] ) noCollision = false; // If the item is unique, add it if ( noCollision ) result.push( second[i] ); } return result; }, grep: function(elems, fn, inv) { // If a string is passed in for the function, make a function // for it (a handy shortcut) if ( fn.constructor == String ) fn = new Function("a","i","return " + fn); var result = []; // Go through the array, only saving the items // that pass the validator function for ( var i = 0; i < elems.length; i++ ) if ( !inv && fn(elems[i],i) || inv && !fn(elems[i],i) ) result.push( elems[i] ); return result; }, map: function(elems, fn) { // If a string is passed in for the function, make a function // for it (a handy shortcut) if ( fn.constructor == String ) fn = new Function("a","return " + fn); var result = []; // Go through the array, translating each of the items to their // new value (or values). for ( var i = 0; i < elems.length; i++ ) { var val = fn(elems[i],i); if ( val !== null && val != undefined ) { if ( val.constructor != Array ) val = [val]; result = jQuery.merge( result, val ); } } return result; }, /* * A number of helper functions used for managing events. * Many of the ideas behind this code orignated from Dean Edwards' addEvent library. */ event: { // Bind an event to an element // Original by Dean Edwards add: function(element, type, handler) { // For whatever reason, IE has trouble passing the window object // around, causing it to be cloned in the process if ( jQuery.browser.msie && element.setInterval != undefined ) element = window; // Make sure that the function being executed has a unique ID if ( !handler.guid ) handler.guid = this.guid++; // Init the element's event structure if (!element.events) element.events = {}; // Get the current list of functions bound to this event var handlers = element.events[type]; // If it hasn't been initialized yet if (!handlers) { // Init the event handler queue handlers = element.events[type] = {}; // Remember an existing handler, if it's already there if (element["on" + type]) handlers[0] = element["on" + type]; } // Add the function to the element's handler list handlers[handler.guid] = handler; // And bind the global event handler to the element element["on" + type] = this.handle; // Remember the function in a global list (for triggering) if (!this.global[type]) this.global[type] = []; this.global[type].push( element ); }, guid: 1, global: {}, // Detach an event or set of events from an element remove: function(element, type, handler) { if (element.events) if (type && element.events[type]) if ( handler ) delete element.events[type][handler.guid]; else for ( var i in element.events[type] ) delete element.events[type][i]; else for ( var j in element.events ) this.remove( element, j ); }, trigger: function(type,data,element) { // Touch up the incoming data data = data || []; // Handle a global trigger if ( !element ) { var g = this.global[type]; if ( g ) for ( var i = 0; i < g.length; i++ ) this.trigger( type, data, g[i] ); // Handle triggering a single element } else if ( element["on" + type] ) { // Pass along a fake event data.unshift( this.fix({ type: type, target: element }) ); // Trigger the event element["on" + type].apply( element, data ); } }, handle: function(event) { if ( typeof jQuery == "undefined" ) return; event = event || jQuery.event.fix( window.event ); // If no correct event was found, fail if ( !event ) return; var returnValue = true; var c = this.events[event.type]; for ( var j in c ) { if ( c[j].apply( this, [event] ) === false ) { event.preventDefault(); event.stopPropagation(); returnValue = false; } } return returnValue; }, fix: function(event) { if ( event ) { event.preventDefault = function() { this.returnValue = false; }; event.stopPropagation = function() { this.cancelBubble = true; }; } return event; } } }); new function() { var b = navigator.userAgent.toLowerCase(); // Figure out what browser is being used jQuery.browser = { safari: /webkit/.test(b), opera: /opera/.test(b), msie: /msie/.test(b) && !/opera/.test(b), mozilla: /mozilla/.test(b) && !/compatible/.test(b) }; // Check to see if the W3C box model is being used jQuery.boxModel = !jQuery.browser.msie || document.compatMode == "CSS1Compat"; }; jQuery.macros = { to: { appendTo: "append", prependTo: "prepend", insertBefore: "before", insertAfter: "after" }, css: "width,height,top,left,position,float,overflow,color,background".split(","), filter: [ "eq", "lt", "gt", "contains" ], attr: { val: "value", html: "innerHTML", id: null, title: null, name: null, href: null, src: null, rel: null }, axis: { <|fim▁hole|> parent: "a.parentNode", ancestors: jQuery.parents, parents: jQuery.parents, next: "jQuery.sibling(a).next", prev: "jQuery.sibling(a).prev", siblings: jQuery.sibling, children: "a.childNodes" }, each: { removeAttr: function( key ) { this.removeAttribute( key ); }, show: function(){ this.style.display = this.oldblock ? this.oldblock : ""; if ( jQuery.css(this,"display") == "none" ) this.style.display = "block"; }, hide: function(){ this.oldblock = this.oldblock || jQuery.css(this,"display"); if ( this.oldblock == "none" ) this.oldblock = "block"; this.style.display = "none"; }, toggle: function(){ $(this)[ $(this).is(":hidden") ? "show" : "hide" ].apply( $(this), arguments ); }, addClass: function(c){ jQuery.className.add(this,c); }, removeClass: function(c){ jQuery.className.remove(this,c); }, toggleClass: function( c ){ jQuery.className[ jQuery.className.has(this,c) ? "remove" : "add" ](this,c); }, remove: function(a){ if ( !a || jQuery.filter( [this], a ).r ) this.parentNode.removeChild( this ); }, empty: function(){ while ( this.firstChild ) this.removeChild( this.firstChild ); }, bind: function( type, fn ) { if ( fn.constructor == String ) fn = new Function("e", ( !fn.indexOf(".") ? "$(this)" : "return " ) + fn); jQuery.event.add( this, type, fn ); }, unbind: function( type, fn ) { jQuery.event.remove( this, type, fn ); }, trigger: function( type, data ) { jQuery.event.trigger( type, data, this ); } } }; jQuery.init();jQuery.fn.extend({ // We're overriding the old toggle function, so // remember it for later _toggle: jQuery.fn.toggle, toggle: function(a,b) { // If two functions are passed in, we're // toggling on a click return a && b && a.constructor == Function && b.constructor == Function ? this.click(function(e){ // Figure out which function to execute this.last = this.last == a ? b : a; // Make sure that clicks stop e.preventDefault(); // and execute the function return this.last.apply( this, [e] ) || false; }) : // Otherwise, execute the old toggle function this._toggle.apply( this, arguments ); }, hover: function(f,g) { // A private function for haandling mouse 'hovering' function handleHover(e) { // Check if mouse(over|out) are still within the same parent element var p = (e.type == "mouseover" ? e.fromElement : e.toElement) || e.relatedTarget; // Traverse up the tree while ( p && p != this ) p = p.parentNode; // If we actually just moused on to a sub-element, ignore it if ( p == this ) return false; // Execute the right function return (e.type == "mouseover" ? f : g).apply(this, [e]); } // Bind the function to the two event listeners return this.mouseover(handleHover).mouseout(handleHover); }, ready: function(f) { // If the DOM is already ready if ( jQuery.isReady ) // Execute the function immediately f.apply( document ); // Otherwise, remember the function for later else { // Add the function to the wait list jQuery.readyList.push( f ); } return this; } }); jQuery.extend({ /* * All the code that makes DOM Ready work nicely. */ isReady: false, readyList: [], // Handle when the DOM is ready ready: function() { // Make sure that the DOM is not already loaded if ( !jQuery.isReady ) { // Remember that the DOM is ready jQuery.isReady = true; // If there are functions bound, to execute if ( jQuery.readyList ) { // Execute all of them for ( var i = 0; i < jQuery.readyList.length; i++ ) jQuery.readyList[i].apply( document ); // Reset the list of functions jQuery.readyList = null; } } } }); new function(){ var e = ("blur,focus,load,resize,scroll,unload,click,dblclick," + "mousedown,mouseup,mousemove,mouseover,mouseout,change,reset,select," + "submit,keydown,keypress,keyup,error").split(","); // Go through all the event names, but make sure that // it is enclosed properly for ( var i = 0; i < e.length; i++ ) new function(){ var o = e[i]; // Handle event binding jQuery.fn[o] = function(f){ return f ? this.bind(o, f) : this.trigger(o); }; // Handle event unbinding jQuery.fn["un"+o] = function(f){ return this.unbind(o, f); }; // Finally, handle events that only fire once jQuery.fn["one"+o] = function(f){ // Attach the event listener return this.each(function(){ var count = 0; // Add the event jQuery.event.add( this, o, function(e){ // If this function has already been executed, stop if ( count++ ) return; // And execute the bound function return f.apply(this, [e]); }); }); }; }; // If Mozilla is used if ( jQuery.browser.mozilla || jQuery.browser.opera ) { // Use the handy event callback document.addEventListener( "DOMContentLoaded", jQuery.ready, false ); // If IE is used, use the excellent hack by Matthias Miller // http://www.outofhanwell.com/blog/index.php?title=the_window_onload_problem_revisited } else if ( jQuery.browser.msie ) { // Only works if you document.write() it document.write("<scr" + "ipt id=__ie_init defer=true " + "src=//:><\/script>"); // Use the defer script hack var script = document.getElementById("__ie_init"); script.onreadystatechange = function() { if ( this.readyState == "complete" ) jQuery.ready(); }; // Clear from memory script = null; // If Safari is used } else if ( jQuery.browser.safari ) { // Continually check to see if the document.readyState is valid jQuery.safariTimer = setInterval(function(){ // loaded and complete are both valid states if ( document.readyState == "loaded" || document.readyState == "complete" ) { // If either one are found, remove the timer clearInterval( jQuery.safariTimer ); jQuery.safariTimer = null; // and execute any waiting functions jQuery.ready(); } }, 10); } // A fallback to window.onload, that will always work jQuery.event.add( window, "load", jQuery.ready ); }; jQuery.fn.extend({ // overwrite the old show method _show: jQuery.fn.show, show: function(speed,callback){ return speed ? this.animate({ height: "show", width: "show", opacity: "show" }, speed, callback) : this._show(); }, // Overwrite the old hide method _hide: jQuery.fn.hide, hide: function(speed,callback){ return speed ? this.animate({ height: "hide", width: "hide", opacity: "hide" }, speed, callback) : this._hide(); }, slideDown: function(speed,callback){ return this.animate({height: "show"}, speed, callback); }, slideUp: function(speed,callback){ return this.animate({height: "hide"}, speed, callback); }, slideToggle: function(speed,callback){ return this.each(function(){ var state = $(this).is(":hidden") ? "show" : "hide"; $(this).animate({height: state}, speed, callback); }); }, fadeIn: function(speed,callback){ return this.animate({opacity: "show"}, speed, callback); }, fadeOut: function(speed,callback){ return this.animate({opacity: "hide"}, speed, callback); }, fadeTo: function(speed,to,callback){ return this.animate({opacity: to}, speed, callback); }, animate: function(prop,speed,callback) { return this.queue(function(){ this.curAnim = prop; for ( var p in prop ) { var e = new jQuery.fx( this, jQuery.speed(speed,callback), p ); if ( prop[p].constructor == Number ) e.custom( e.cur(), prop[p] ); else e[ prop[p] ]( prop ); } }); }, queue: function(type,fn){ if ( !fn ) { fn = type; type = "fx"; } return this.each(function(){ if ( !this.queue ) this.queue = {}; if ( !this.queue[type] ) this.queue[type] = []; this.queue[type].push( fn ); if ( this.queue[type].length == 1 ) fn.apply(this); }); } }); jQuery.extend({ setAuto: function(e,p) { if ( e.notAuto ) return; if ( p == "height" && e.scrollHeight != parseInt(jQuery.curCSS(e,p)) ) return; if ( p == "width" && e.scrollWidth != parseInt(jQuery.curCSS(e,p)) ) return; // Remember the original height var a = e.style[p]; // Figure out the size of the height right now var o = jQuery.curCSS(e,p,1); if ( p == "height" && e.scrollHeight != o || p == "width" && e.scrollWidth != o ) return; // Set the height to auto e.style[p] = e.currentStyle ? "" : "auto"; // See what the size of "auto" is var n = jQuery.curCSS(e,p,1); // Revert back to the original size if ( o != n && n != "auto" ) { e.style[p] = a; e.notAuto = true; } }, speed: function(s,o) { o = o || {}; if ( o.constructor == Function ) o = { complete: o }; var ss = { slow: 600, fast: 200 }; o.duration = (s && s.constructor == Number ? s : ss[s]) || 400; // Queueing o.oldComplete = o.complete; o.complete = function(){ jQuery.dequeue(this, "fx"); if ( o.oldComplete && o.oldComplete.constructor == Function ) o.oldComplete.apply( this ); }; return o; }, queue: {}, dequeue: function(elem,type){ type = type || "fx"; if ( elem.queue && elem.queue[type] ) { // Remove self elem.queue[type].shift(); // Get next function var f = elem.queue[type][0]; if ( f ) f.apply( elem ); } }, /* * I originally wrote fx() as a clone of moo.fx and in the process * of making it small in size the code became illegible to sane * people. You've been warned. */ fx: function( elem, options, prop ){ var z = this; // The users options z.o = { duration: options.duration || 400, complete: options.complete, step: options.step }; // The element z.el = elem; // The styles var y = z.el.style; // Simple function for setting a style value z.a = function(){ if ( options.step ) options.step.apply( elem, [ z.now ] ); if ( prop == "opacity" ) { if (z.now == 1) z.now = 0.9999; if (window.ActiveXObject) y.filter = "alpha(opacity=" + z.now*100 + ")"; else y.opacity = z.now; // My hate for IE will never die } else if ( parseInt(z.now) ) y[prop] = parseInt(z.now) + "px"; y.display = "block"; }; // Figure out the maximum number to run to z.max = function(){ return parseFloat( jQuery.css(z.el,prop) ); }; // Get the current size z.cur = function(){ var r = parseFloat( jQuery.curCSS(z.el, prop) ); return r && r > -10000 ? r : z.max(); }; // Start an animation from one number to another z.custom = function(from,to){ z.startTime = (new Date()).getTime(); z.now = from; z.a(); z.timer = setInterval(function(){ z.step(from, to); }, 13); }; // Simple 'show' function z.show = function( p ){ if ( !z.el.orig ) z.el.orig = {}; // Remember where we started, so that we can go back to it later z.el.orig[prop] = this.cur(); z.custom( 0, z.el.orig[prop] ); // Stupid IE, look what you made me do if ( prop != "opacity" ) y[prop] = "1px"; }; // Simple 'hide' function z.hide = function(){ if ( !z.el.orig ) z.el.orig = {}; // Remember where we started, so that we can go back to it later z.el.orig[prop] = this.cur(); z.o.hide = true; // Begin the animation z.custom(z.el.orig[prop], 0); }; // IE has trouble with opacity if it does not have layout if ( jQuery.browser.msie && !z.el.currentStyle.hasLayout ) y.zoom = "1"; // Remember the overflow of the element if ( !z.el.oldOverlay ) z.el.oldOverflow = jQuery.css( z.el, "overflow" ); // Make sure that nothing sneaks out y.overflow = "hidden"; // Each step of an animation z.step = function(firstNum, lastNum){ var t = (new Date()).getTime(); if (t > z.o.duration + z.startTime) { // Stop the timer clearInterval(z.timer); z.timer = null; z.now = lastNum; z.a(); z.el.curAnim[ prop ] = true; var done = true; for ( var i in z.el.curAnim ) if ( z.el.curAnim[i] !== true ) done = false; if ( done ) { // Reset the overflow y.overflow = z.el.oldOverflow; // Hide the element if the "hide" operation was done if ( z.o.hide ) y.display = 'none'; // Reset the property, if the item has been hidden if ( z.o.hide ) { for ( var p in z.el.curAnim ) { y[ p ] = z.el.orig[p] + ( p == "opacity" ? "" : "px" ); // set its height and/or width to auto if ( p == 'height' || p == 'width' ) jQuery.setAuto( z.el, p ); } } } // If a callback was provided, execute it if( done && z.o.complete && z.o.complete.constructor == Function ) // Execute the complete function z.o.complete.apply( z.el ); } else { // Figure out where in the animation we are and set the number var p = (t - this.startTime) / z.o.duration; z.now = ((-Math.cos(p*Math.PI)/2) + 0.5) * (lastNum-firstNum) + firstNum; // Perform the next step of the animation z.a(); } }; } }); // AJAX Plugin // Docs Here: // http://jquery.com/docs/ajax/ jQuery.fn.loadIfModified = function( url, params, callback ) { this.load( url, params, callback, 1 ); }; jQuery.fn.load = function( url, params, callback, ifModified ) { if ( url.constructor == Function ) return this.bind("load", url); callback = callback || function(){}; // Default to a GET request var type = "GET"; // If the second parameter was provided if ( params ) { // If it's a function if ( params.constructor == Function ) { // We assume that it's the callback callback = params; params = null; // Otherwise, build a param string } else { params = jQuery.param( params ); type = "POST"; } } var self = this; // Request the remote document jQuery.ajax( type, url, params,function(res, status){ if ( status == "success" || !ifModified && status == "notmodified" ) { // Inject the HTML into all the matched elements self.html(res.responseText).each( callback, [res.responseText, status] ); // Execute all the scripts inside of the newly-injected HTML $("script", self).each(function(){ if ( this.src ) $.getScript( this.src ); else eval.call( window, this.text || this.textContent || this.innerHTML || "" ); }); } else callback.apply( self, [res.responseText, status] ); }, ifModified); return this; }; // If IE is used, create a wrapper for the XMLHttpRequest object if ( jQuery.browser.msie ) XMLHttpRequest = function(){ return new ActiveXObject( navigator.userAgent.indexOf("MSIE 5") >= 0 ? "Microsoft.XMLHTTP" : "Msxml2.XMLHTTP" ); }; // Attach a bunch of functions for handling common AJAX events new function(){ var e = "ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess".split(','); for ( var i = 0; i < e.length; i++ ) new function(){ var o = e[i]; jQuery.fn[o] = function(f){ return this.bind(o, f); }; }; }; jQuery.extend({ get: function( url, data, callback, type, ifModified ) { if ( data.constructor == Function ) { type = callback; callback = data; data = null; } if ( data ) url += "?" + jQuery.param(data); // Build and start the HTTP Request jQuery.ajax( "GET", url, null, function(r, status) { if ( callback ) callback( jQuery.httpData(r,type), status ); }, ifModified); }, getIfModified: function( url, data, callback, type ) { jQuery.get(url, data, callback, type, 1); }, getScript: function( url, data, callback ) { jQuery.get(url, data, callback, "script"); }, post: function( url, data, callback, type ) { // Build and start the HTTP Request jQuery.ajax( "POST", url, jQuery.param(data), function(r, status) { if ( callback ) callback( jQuery.httpData(r,type), status ); }); }, // timeout (ms) timeout: 0, ajaxTimeout: function(timeout) { jQuery.timeout = timeout; }, // Last-Modified header cache for next request lastModified: {}, ajax: function( type, url, data, ret, ifModified ) { // If only a single argument was passed in, // assume that it is a object of key/value pairs if ( !url ) { ret = type.complete; var success = type.success; var error = type.error; data = type.data; url = type.url; type = type.type; } // Watch for a new set of requests if ( ! jQuery.active++ ) jQuery.event.trigger( "ajaxStart" ); var requestDone = false; // Create the request object var xml = new XMLHttpRequest(); // Open the socket xml.open(type || "GET", url, true); // Set the correct header, if data is being sent if ( data ) xml.setRequestHeader("Content-Type", "application/x-www-form-urlencoded"); // Set the If-Modified-Since header, if ifModified mode. if ( ifModified ) xml.setRequestHeader("If-Modified-Since", jQuery.lastModified[url] || "Thu, 01 Jan 1970 00:00:00 GMT" ); // Set header so calling script knows that it's an XMLHttpRequest xml.setRequestHeader("X-Requested-With", "XMLHttpRequest"); // Make sure the browser sends the right content length if ( xml.overrideMimeType ) xml.setRequestHeader("Connection", "close"); // Wait for a response to come back var onreadystatechange = function(istimeout){ // The transfer is complete and the data is available, or the request timed out if ( xml && (xml.readyState == 4 || istimeout == "timeout") ) { requestDone = true; var status = jQuery.httpSuccess( xml ) && istimeout != "timeout" ? ifModified && jQuery.httpNotModified( xml, url ) ? "notmodified" : "success" : "error"; // Make sure that the request was successful or notmodified if ( status != "error" ) { // Cache Last-Modified header, if ifModified mode. var modRes = xml.getResponseHeader("Last-Modified"); if ( ifModified && modRes ) jQuery.lastModified[url] = modRes; // If a local callback was specified, fire it if ( success ) success( xml, status ); // Fire the global callback jQuery.event.trigger( "ajaxSuccess" ); // Otherwise, the request was not successful } else { // If a local callback was specified, fire it if ( error ) error( xml, status ); // Fire the global callback jQuery.event.trigger( "ajaxError" ); } // The request was completed jQuery.event.trigger( "ajaxComplete" ); // Handle the global AJAX counter if ( ! --jQuery.active ) jQuery.event.trigger( "ajaxStop" ); // Process result if ( ret ) ret(xml, status); // Stop memory leaks xml.onreadystatechange = function(){}; xml = null; } }; xml.onreadystatechange = onreadystatechange; // Timeout checker if(jQuery.timeout > 0) setTimeout(function(){ // Check to see if the request is still happening if (xml) { // Cancel the request xml.abort(); if ( !requestDone ) onreadystatechange( "timeout" ); // Clear from memory xml = null; } }, jQuery.timeout); // Send the data xml.send(data); }, // Counter for holding the number of active queries active: 0, // Determines if an XMLHttpRequest was successful or not httpSuccess: function(r) { try { return !r.status && location.protocol == "file:" || ( r.status >= 200 && r.status < 300 ) || r.status == 304 || jQuery.browser.safari && r.status == undefined; } catch(e){} return false; }, // Determines if an XMLHttpRequest returns NotModified httpNotModified: function(xml, url) { try { var xmlRes = xml.getResponseHeader("Last-Modified"); // Firefox always returns 200. check Last-Modified date return xml.status == 304 || xmlRes == jQuery.lastModified[url] || jQuery.browser.safari && xml.status == undefined; } catch(e){} return false; }, // Get the data out of an XMLHttpRequest. // Return parsed XML if content-type header is "xml" and type is "xml" or omitted, // otherwise return plain text. httpData: function(r,type) { var ct = r.getResponseHeader("content-type"); var data = !type && ct && ct.indexOf("xml") >= 0; data = type == "xml" || data ? r.responseXML : r.responseText; // If the type is "script", eval it if ( type == "script" ) eval.call( window, data ); return data; }, // Serialize an array of form elements or a set of // key/values into a query string param: function(a) { var s = []; // If an array was passed in, assume that it is an array // of form elements if ( a.constructor == Array ) { // Serialize the form elements for ( var i = 0; i < a.length; i++ ) s.push( a[i].name + "=" + encodeURIComponent( a[i].value ) ); // Otherwise, assume that it's an object of key/value pairs } else { // Serialize the key/values for ( var j in a ) s.push( j + "=" + encodeURIComponent( a[j] ) ); } // Return the resulting serialization return s.join("&"); } });<|fim▁end|>
<|file_name|>test_composer4to5.py<|end_file_name|><|fim▁begin|>from axiom.test.historic.stubloader import StubbedTest from xquotient.compose import Composer, Drafts class ComposerUpgradeTestCase(StubbedTest): """ Test that the Composer no longer has a 'drafts' attribute, that no Drafts items have been created and that the other attributes have been copied. """ <|fim▁hole|> self.failIf(hasattr(composer, 'drafts'), "Still has 'drafts' attribute") self.assertNotEqual(composer.privateApplication, None) self.assertEqual(self.store.count(Drafts), 0)<|fim▁end|>
def test_upgrade(self): composer = self.store.findUnique(Composer)
<|file_name|>events.js<|end_file_name|><|fim▁begin|>//////////////////////////////////////////////////////////////////////////////////// ////// Events //////////////////////////////////////////////////////////////////////////////////// 'use strict'; // DI var db, responseHandler; /** * * @param req the HTTP requests, contains header and body parameters * @param res the callback to which send HTTP response * @param next facilitate restify function chaining */ exports.findAll = function (req, res, next) { req.check('appid', '"appid": must be a valid identifier').notNull(); var errors = req.validationErrors(), appid; if (errors) { responseHandler(res).error(400, errors); return; } appid = req.params.appid; db.findAllEvents({'application_id': appid}, responseHandler(res, next)); }; /** *<|fim▁hole|> */ exports.findById = function (req, res, next) { req.check('appid', '"appid": must be a valid identifier').notNull(); req.check('eventid', '"eventid": must be a valid identifier').notNull(); var errors = req.validationErrors(), appid, eventid; if (errors) { responseHandler(res).error(400, errors); return; } appid = req.params.appid; eventid = req.params.eventid; db.findEventById({'application_id': appid, 'event_id': eventid}, responseHandler(res, next)); }; /** * * @param req the HTTP requests, contains header and body parameters * @param res the callback to which send HTTP response * @param next facilitate restify function chaining */ exports.create = function (req, res, next) { req.check('appid', '"appid": must be a valid identifier').notNull(); req.check('type', '"type": must be a valid identifier').notNull(); req.check('user', '"user": must be a valid identifier').notNull(); req.check('issued', '"date": must be a valid date').isDate(); var errors = req.validationErrors(), appid, type, user, issued; if (errors) { responseHandler(res).error(400, errors); return; } appid = req.params.appid; type = req.params.type; user = req.params.user; issued = req.params.issued; // or "2013-02-26"; // TODO today or specified db.createEvent( {'application_id': appid, 'type': type, 'user': user, 'issued': issued}, responseHandler(res, next) ); };<|fim▁end|>
* @param req the HTTP requests, contains header and body parameters * @param res the callback to which send HTTP response * @param next facilitate restify function chaining
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>""" WSGI config for bugsnag_demo project. It exposes the WSGI callable as a module-level variable named ``application``. <|fim▁hole|>For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bugsnag_demo.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()<|fim▁end|>
<|file_name|>comp-2181.component.spec.ts<|end_file_name|><|fim▁begin|>/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import { async, ComponentFixture, TestBed } from '@angular/core/testing'; import { Comp2181Component } from './comp-2181.component'; describe('Comp2181Component', () => { let component: Comp2181Component; let fixture: ComponentFixture<Comp2181Component>; beforeEach(async(() => { TestBed.configureTestingModule({ declarations: [ Comp2181Component ] }) .compileComponents(); })); beforeEach(() => { fixture = TestBed.createComponent(Comp2181Component); component = fixture.componentInstance; fixture.detectChanges(); }); it('should create', () => { expect(component).toBeTruthy(); });<|fim▁hole|><|fim▁end|>
});
<|file_name|>script.rs<|end_file_name|><|fim▁begin|>extern crate base64; extern crate bytes; extern crate futures; extern crate sodiumoxide; extern crate serde_json; extern crate tokio_core; extern crate tokio_io; extern crate tokio_proto; extern crate tokio_process; extern crate tokio_service; use std; use std::error::Error as StdError; use std::sync::{Arc, Mutex}; use std::process::Command; use self::bytes::BytesMut; use self::futures::{future, Future, BoxFuture, Sink, Stream}; use self::sodiumoxide::crypto::secretbox; use self::tokio_io::{AsyncRead, AsyncWrite}; use self::tokio_io::codec::{Decoder, Encoder, Framed}; use self::tokio_process::CommandExt; use self::tokio_service::Service; use super::*; use self::context::Context; use self::value::{Identifier, Message, Obj}; #[derive(Serialize, Deserialize)] #[derive(Debug)] pub enum Request { GetVars, SetVars { contents: Obj }, UnsetVar { key: String }, GetMsg { topic: String }, SendMsg { dst_remote: Option<String>, dst_agent: String, topic: String, contents: Obj, }, ReplyMsg { src_topic: String, topic: String, contents: Obj, }, } #[derive(Serialize, Deserialize)] #[derive(Debug)] pub enum Response { GetVars(Obj), SetVars, UnsetVar, GetMsg(Message), SendMsg { dst_remote: Option<String>, dst_agent: String, topic: String, }, NotFound, Error(String), } pub struct ServiceCodec; impl Decoder for ServiceCodec { type Item = Request; type Error = std::io::Error; fn decode(&mut self, buf: &mut BytesMut) -> std::io::Result<Option<Self::Item>> { if let Some(i) = buf.iter().position(|&b| b == b'\n') { // remove the serialized frame from the buffer. let line = buf.split_to(i); // Also remove the '\n' buf.split_to(1); // Turn this data into a UTF string and // return it in a Frame. let maybe_req: std::result::Result<Self::Item, serde_json::error::Error> = serde_json::from_slice(&line[..]); match maybe_req { Ok(req) => { debug!("service decode {:?}", req); buf.take(); Ok(Some(req)) } Err(e) => { error!("decode failed: {}", e); Err(std::io::Error::new(std::io::ErrorKind::Other, e.description())) } } } else { Ok(None) } } } impl Encoder for ServiceCodec { type Item = Response; type Error = std::io::Error; fn encode(&mut self, msg: Self::Item, buf: &mut BytesMut) -> std::io::Result<()> { match serde_json::to_vec(&msg) { Ok(json) => { debug!("service encode {:?}", msg); buf.extend(&json[..]); Ok(()) } Err(e) => { error!("service encode failed: {}", e); Err(std::io::Error::new(std::io::ErrorKind::Other, e.description())) } }?; buf.extend(&b"\n"[..]); Ok(()) } } pub struct ClientCodec; impl Decoder for ClientCodec { type Item = Response; type Error = std::io::Error; fn decode(&mut self, buf: &mut BytesMut) -> std::io::Result<Option<Self::Item>> { if let Some(i) = buf.iter().position(|&b| b == b'\n') { // remove the serialized frame from the buffer. let line = buf.split_to(i); // Also remove the '\n' buf.split_to(1); // Turn this data into a UTF string and // return it in a Frame. let maybe_req: std::result::Result<Self::Item, serde_json::error::Error> = serde_json::from_slice(&line[..]); match maybe_req { Ok(req) => { debug!("client decode {:?}", req); buf.take(); Ok(Some(req)) } Err(e) => { error!("client decode failed: {}", e); Err(std::io::Error::new(std::io::ErrorKind::Other, e.description())) } } } else { Ok(None) } } } impl Encoder for ClientCodec { type Item = Request; type Error = std::io::Error; fn encode(&mut self, msg: Self::Item, buf: &mut BytesMut) -> std::io::Result<()> { match serde_json::to_vec(&msg) { Ok(json) => { debug!("client encode {:?}", msg); buf.extend(&json[..]); Ok(()) } Err(e) => { error!("client encode failed: {}", e); Err(std::io::Error::new(std::io::ErrorKind::Other, e.description())) } }?; buf.extend(&b"\n"[..]); Ok(()) } } pub struct ClientProto { key: secretbox::Key, } impl ClientProto { pub fn new_from_env() -> Result<ClientProto> { let key_str = std::env::var("GLOP_SCRIPT_KEY").map_err(Error::Env)?; let key_bytes = base64::decode(&key_str).map_err(|e| Error::InvalidArgument(format!("{}", e)))?; let key = match secretbox::Key::from_slice(&key_bytes) { Some(k) => k, None => return Err(Error::InvalidArgument("GLOP_SCRIPT_KEY".to_string())), }; Ok(ClientProto { key: key }) } } impl<T: AsyncRead + AsyncWrite + 'static> tokio_proto::pipeline::ClientProto<T> for ClientProto { type Request = Request; type Response = Response; type Transport = Framed<T, crypto::SecretBoxCodec<ClientCodec>>; type BindTransport = std::io::Result<Self::Transport>; fn bind_transport(&self, io: T) -> Self::BindTransport { Ok(io.framed(crypto::SecretBoxCodec::new(ClientCodec, self.key.clone()))) } } pub struct ScriptService { ctx: Arc<Mutex<Context>>, actions: Arc<Mutex<Vec<Action>>>, } impl ScriptService { fn new(ctx: Arc<Mutex<Context>>, actions: Arc<Mutex<Vec<Action>>>) -> ScriptService { ScriptService { ctx: ctx, actions: actions, }<|fim▁hole|>} impl Service for ScriptService { // These types must match the corresponding protocol types: type Request = Request; type Response = Response; // For non-streaming protocols, service errors are always io::Error type Error = std::io::Error; // The future for computing the response; box it for simplicity. type Future = BoxFuture<Self::Response, Self::Error>; // Produce a future for computing a response from a request. fn call(&self, req: Self::Request) -> Self::Future { let mut ctx = self.ctx.lock().unwrap(); let res = match req { Request::GetVars => Response::GetVars(ctx.vars.clone()), Request::SetVars { ref contents } => { let mut actions = self.actions.lock().unwrap(); for (k, v) in contents.iter() { let id = Identifier::from_str(k); ctx.set_var(&id, v.clone()); actions.push(Action::SetVar(id, v.clone())); } drop(ctx); drop(actions); Response::SetVars } Request::UnsetVar { ref key } => { let id = Identifier::from_str(key); ctx.unset_var(&id); drop(ctx); let mut actions = self.actions.lock().unwrap(); actions.push(Action::UnsetVar(id)); drop(actions); Response::UnsetVar } Request::GetMsg { ref topic } => { match ctx.msgs.get(topic) { Some(msg) => Response::GetMsg(msg.clone()), None => Response::NotFound, } } Request::SendMsg { ref dst_remote, ref dst_agent, ref topic, ref contents } => { drop(ctx); let mut actions = self.actions.lock().unwrap(); actions.push(Action::SendMsg { dst_remote: dst_remote.clone(), dst_agent: dst_agent.to_string(), topic: topic.to_string(), in_reply_to: None, contents: contents.clone(), }); drop(actions); Response::SendMsg { dst_remote: dst_remote.clone(), dst_agent: dst_agent.to_string(), topic: topic.to_string(), } } Request::ReplyMsg { ref src_topic, ref topic, ref contents } => { if let Some(ref src_msg) = ctx.msgs.get(src_topic) { let mut actions = self.actions.lock().unwrap(); actions.push(Action::SendMsg { dst_remote: src_msg.src_remote.clone(), dst_agent: src_msg.src_agent.to_string(), topic: topic.to_string(), in_reply_to: Some(src_msg.id.to_string()), contents: contents.clone(), }); drop(actions); Response::SendMsg { dst_remote: src_msg.src_remote.clone(), dst_agent: src_msg.src_agent.to_string(), topic: topic.to_string(), } } else { Response::Error(format!("topic {} not found", topic)) } } }; future::ok(res).boxed() } } pub fn run_script(ctx: Arc<Mutex<Context>>, script_path: &str) -> Result<Vec<Action>> { let mut core = tokio_core::reactor::Core::new().map_err(error::Error::IO)?; let handle = core.handle(); let addr = "127.0.0.1:0".parse().unwrap(); let listener = tokio_core::net::TcpListener::bind(&addr, &handle).map_err(error::Error::IO)?; let listen_addr = &listener.local_addr().map_err(error::Error::IO)?; let connections = listener.incoming(); let mut cmd = &mut Command::new(script_path); let src = { let ctx = ctx.lock().unwrap(); ctx.set_env(cmd); ctx.src.to_string() }; let key = secretbox::gen_key(); let actions = Arc::new(Mutex::new(vec![])); let server_actions = actions.clone(); let child = cmd.env("GLOP_SCRIPT_ADDR", format!("{}", listen_addr)) .env("GLOP_SCRIPT_KEY", base64::encode(&key.0)) .output_async(&handle) .then(|result| match result { Ok(output) => { let mut stdout = String::from_utf8(output.stdout).unwrap(); stdout.pop(); if !stdout.is_empty() { info!("{}: stdout: {}", src, stdout); } let mut stderr = String::from_utf8(output.stderr).unwrap(); stderr.pop(); if !stderr.is_empty() { info!("{}: stderr: {}", src, stderr); } if output.status.success() { Ok(()) } else { let code = match output.status.code() { Some(value) => value, None => 0, }; Err(Error::Exec(code, stderr)) } } Err(e) => Err(Error::IO(e)), }); let server = connections.for_each(move |(socket, _peer_addr)| { let (wr, rd) = socket.framed(crypto::SecretBoxCodec::new(ServiceCodec, key.clone())) .split(); let service = ScriptService::new(ctx.clone(), server_actions.clone()); let responses = rd.and_then(move |req| service.call(req)); let responder = wr.send_all(responses).then(|_| Ok(())); handle.spawn(responder); Ok(()) }) .map_err(Error::IO); let comb = server.select(child); match core.run(comb) { Err((e, _)) => { return Err(e); } Ok(_) => Ok(actions.lock().unwrap().clone()), } }<|fim▁end|>
}
<|file_name|>topictypes.go<|end_file_name|><|fim▁begin|>package eventgrid // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/tracing" "net/http" ) // TopicTypesClient is the azure EventGrid Management Client type TopicTypesClient struct { BaseClient } // NewTopicTypesClient creates an instance of the TopicTypesClient client. func NewTopicTypesClient(subscriptionID string) TopicTypesClient { return NewTopicTypesClientWithBaseURI(DefaultBaseURI, subscriptionID) } // NewTopicTypesClientWithBaseURI creates an instance of the TopicTypesClient client. func NewTopicTypesClientWithBaseURI(baseURI string, subscriptionID string) TopicTypesClient { return TopicTypesClient{NewWithBaseURI(baseURI, subscriptionID)} } // Get get information about a topic type // Parameters: // topicTypeName - name of the topic type func (client TopicTypesClient) Get(ctx context.Context, topicTypeName string) (result TopicTypeInfo, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/TopicTypesClient.Get") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.GetPreparer(ctx, topicTypeName) if err != nil { err = autorest.NewErrorWithError(err, "eventgrid.TopicTypesClient", "Get", nil, "Failure preparing request") return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "eventgrid.TopicTypesClient", "Get", resp, "Failure sending request") return } result, err = client.GetResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "eventgrid.TopicTypesClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. func (client TopicTypesClient) GetPreparer(ctx context.Context, topicTypeName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "topicTypeName": autorest.Encode("path", topicTypeName), } const APIVersion = "2019-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/providers/Microsoft.EventGrid/topicTypes/{topicTypeName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client TopicTypesClient) GetSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always // closes the http.Response Body. func (client TopicTypesClient) GetResponder(resp *http.Response) (result TopicTypeInfo, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // List list all registered topic types func (client TopicTypesClient) List(ctx context.Context) (result TopicTypesListResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/TopicTypesClient.List") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.ListPreparer(ctx) if err != nil { err = autorest.NewErrorWithError(err, "eventgrid.TopicTypesClient", "List", nil, "Failure preparing request") return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "eventgrid.TopicTypesClient", "List", resp, "Failure sending request") return } result, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "eventgrid.TopicTypesClient", "List", resp, "Failure responding to request") } return } // ListPreparer prepares the List request. func (client TopicTypesClient) ListPreparer(ctx context.Context) (*http.Request, error) { const APIVersion = "2019-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/providers/Microsoft.EventGrid/topicTypes"), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client TopicTypesClient) ListSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always // closes the http.Response Body. func (client TopicTypesClient) ListResponder(resp *http.Response) (result TopicTypesListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // ListEventTypes list event types for a topic type // Parameters: // topicTypeName - name of the topic type func (client TopicTypesClient) ListEventTypes(ctx context.Context, topicTypeName string) (result EventTypesListResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/TopicTypesClient.ListEventTypes") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.ListEventTypesPreparer(ctx, topicTypeName) if err != nil { err = autorest.NewErrorWithError(err, "eventgrid.TopicTypesClient", "ListEventTypes", nil, "Failure preparing request") return } resp, err := client.ListEventTypesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "eventgrid.TopicTypesClient", "ListEventTypes", resp, "Failure sending request") return } result, err = client.ListEventTypesResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "eventgrid.TopicTypesClient", "ListEventTypes", resp, "Failure responding to request") } return } // ListEventTypesPreparer prepares the ListEventTypes request. func (client TopicTypesClient) ListEventTypesPreparer(ctx context.Context, topicTypeName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "topicTypeName": autorest.Encode("path", topicTypeName), } const APIVersion = "2019-01-01" queryParameters := map[string]interface{}{ "api-version": APIVersion,<|fim▁hole|> preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/providers/Microsoft.EventGrid/topicTypes/{topicTypeName}/eventTypes", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListEventTypesSender sends the ListEventTypes request. The method will close the // http.Response Body if it receives an error. func (client TopicTypesClient) ListEventTypesSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) return autorest.SendWithSender(client, req, sd...) } // ListEventTypesResponder handles the response to the ListEventTypes request. The method always // closes the http.Response Body. func (client TopicTypesClient) ListEventTypesResponder(resp *http.Response) (result EventTypesListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return }<|fim▁end|>
}
<|file_name|>pad.rs<|end_file_name|><|fim▁begin|>// Copyright (C) 2019-2020 François Laignel <[email protected]> // Copyright (C) 2020 Sebastian Dröge <[email protected]> // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Library General Public // License as published by the Free Software Foundation; either // version 2 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Library General Public License for more details. // // You should have received a copy of the GNU Library General Public // License along with this library; if not, write to the // Free Software Foundation, Inc., 51 Franklin Street, Suite 500, // Boston, MA 02110-1335, USA. //! An implementation of `Pad`s to run asynchronous processings. //! //! [`PadSink`] & [`PadSrc`] provide an asynchronous API to ease the development of `Element`s in //! the `threadshare` GStreamer plugins framework. //! //! The diagram below shows how the [`PadSrc`] & [`PadSink`] and the related `struct`s integrate in //! `ts` `Element`s. //! //! Note: [`PadSrc`] & [`PadSink`] only support `gst::PadMode::Push` at the moment. //! //! ```text //! ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━ //! Element A ┃ ┃ Element B //! ┃ ┃ //! ╭─────────────────╮ ┃ ┃ ╭──────────────────╮ //! │ PadSrc │ ┃ ┃ │ PadSink │ //! │ Handler │ ┃ ┃ │ Handler │ //! │─────────────────│ ┃ ┃ │──────────────────│ //! │ - src_activate* │ ╭──┸──╮ ╭──┸──╮ │ - sink_activate* │ //! │ - src_event* │<────│ │<╌╌╌│ │───>│ - sink_chain* │ //! │ - src_query │<────│ gst │ │ gst │───>│ - sink_event* │ //! │─────────────────│ │ │ │ │───>│ - sink_query │ //! │ - task fn │ │ Pad │ │ Pad │ ╰──────────────────╯ //! ╰─────────────────╯ ╭─>│ │╌╌╌>│ │─╮ │ //! ╭───────╯ │ │ ╰──┰──╯ ╰──┰──╯ ╰───────╮ │ //! ╭────────────╮ ╭────────╮ push* │ ┃ ┃ ╭─────────╮ //! │ Pad Task ↺ │──>│ PadSrc │───────╯ ┃ ┃ │ PadSink │ //! ╰────────────╯ ╰────────╯ ┃ ┃ ╰─────────╯ //! ━━━━━━━│━━━━━━━━━━━━━━│━━━━━━━━━━━━━━━━━┛ ┗━━━━━━━━━━━━━━━│━━━━━━━━━━━━ //! ╰──────────────┴───────────────────╮ ╭─────────────────╯ //! ╭────────────╮ //! │ Context ↺ │ //! ╰────────────╯ //! ``` //! //! Asynchronous operations for both [`PadSrc`] in `Element A` and [`PadSink`] in `Element B` run on //! the same [`Context`], which can also be shared by other `Element`s or instances of the same //! `Element`s in multiple `Pipeline`s. //! //! `Element A` & `Element B` can also be linked to non-threadshare `Element`s in which case, they //! operate in a regular synchronous way. //! //! Note that only operations on the streaming thread (serialized events, buffers, serialized //! queries) are handled from the `PadContext` and asynchronously, everything else operates //! blocking. //! //! [`PadSink`]: struct.PadSink.html //! [`PadSrc`]: struct.PadSrc.html //! [`Context`]: ../executor/struct.Context.html use futures::future; use futures::future::BoxFuture; use futures::prelude::*; use gst::prelude::*; use gst::subclass::prelude::*; use gst::{gst_debug, gst_error, gst_fixme, gst_log, gst_loggable_error}; use gst::{FlowError, FlowSuccess}; use std::marker::PhantomData; use std::ops::Deref; use std::sync::{Arc, Weak}; use super::executor::{block_on_or_add_sub_task, Context}; use super::RUNTIME_CAT; #[inline] fn event_ret_to_event_full_res( ret: bool, event_type: gst::EventType, ) -> Result<FlowSuccess, FlowError> { if ret { Ok(FlowSuccess::Ok) } else if event_type == gst::EventType::Caps { Err(FlowError::NotNegotiated) } else { Err(FlowError::Error) } } #[inline] fn event_to_event_full(ret: bool, event_type: gst::EventType) -> Result<FlowSuccess, FlowError> { event_ret_to_event_full_res(ret, event_type) } #[inline] fn event_to_event_full_serialized( ret: BoxFuture<'static, bool>, event_type: gst::EventType, ) -> BoxFuture<'static, Result<FlowSuccess, FlowError>> { ret.map(move |ret| event_ret_to_event_full_res(ret, event_type)) .boxed() } /// A trait to define `handler`s for [`PadSrc`] callbacks. /// /// *See the [`pad` module] documentation for a description of the model.* /// /// [`PadSrc`]: struct.PadSrc.html /// [`pad` module]: index.html pub trait PadSrcHandler: Clone + Send + Sync + 'static { type ElementImpl: ElementImpl + ObjectSubclass; fn src_activate( &self, pad: &PadSrcRef, _imp: &Self::ElementImpl, _element: &gst::Element, ) -> Result<(), gst::LoggableError> { let gst_pad = pad.gst_pad(); if gst_pad.is_active() { gst_debug!( RUNTIME_CAT, obj: gst_pad, "Already activated in {:?} mode ", gst_pad.get_mode() ); return Ok(()); } gst_pad .activate_mode(gst::PadMode::Push, true) .map_err(|err| { gst_error!( RUNTIME_CAT, obj: gst_pad, "Error in PadSrc activate: {:?}", err ); gst_loggable_error!(RUNTIME_CAT, "Error in PadSrc activate: {:?}", err) }) } fn src_activatemode( &self, _pad: &PadSrcRef, _imp: &Self::ElementImpl, _element: &gst::Element, _mode: gst::PadMode, _active: bool, ) -> Result<(), gst::LoggableError> { Ok(()) } fn src_event( &self, pad: &PadSrcRef, _imp: &Self::ElementImpl, element: &gst::Element, event: gst::Event, ) -> bool { gst_log!(RUNTIME_CAT, obj: pad.gst_pad(), "Handling {:?}", event); pad.gst_pad().event_default(Some(element), event) } fn src_event_full( &self, pad: &PadSrcRef, imp: &Self::ElementImpl, element: &gst::Element, event: gst::Event, ) -> Result<FlowSuccess, FlowError> { // default is to dispatch to `src_event` // (as implemented in `gst_pad_send_event_unchecked`) let event_type = event.get_type(); event_to_event_full(self.src_event(pad, imp, element, event), event_type) } fn src_query( &self, pad: &PadSrcRef, _imp: &Self::ElementImpl, element: &gst::Element, query: &mut gst::QueryRef, ) -> bool { gst_log!(RUNTIME_CAT, obj: pad.gst_pad(), "Handling {:?}", query); if query.is_serialized() { // FIXME serialized queries should be handled with the dataflow // but we can't return a `Future` because we couldn't honor QueryRef's lifetime false } else { pad.gst_pad().query_default(Some(element), query) } } } #[derive(Debug)] pub struct PadSrcInner { gst_pad: gst::Pad, } impl PadSrcInner { fn new(gst_pad: gst::Pad) -> Self { if gst_pad.get_direction() != gst::PadDirection::Src { panic!("Wrong pad direction for PadSrc"); } PadSrcInner { gst_pad } } pub fn gst_pad(&self) -> &gst::Pad { &self.gst_pad } pub async fn push(&self, buffer: gst::Buffer) -> Result<FlowSuccess, FlowError> { gst_log!(RUNTIME_CAT, obj: self.gst_pad(), "Pushing {:?}", buffer); let success = self.gst_pad.push(buffer).map_err(|err| { gst_error!(RUNTIME_CAT, obj: self.gst_pad(), "Failed to push Buffer to PadSrc: {:?}", err, ); err })?; gst_log!(RUNTIME_CAT, obj: &self.gst_pad, "Processing any pending sub tasks"); while Context::current_has_sub_tasks() { Context::drain_sub_tasks().await?; } Ok(success) } pub async fn push_list(&self, list: gst::BufferList) -> Result<FlowSuccess, FlowError> { gst_log!(RUNTIME_CAT, obj: self.gst_pad(), "Pushing {:?}", list); let success = self.gst_pad.push_list(list).map_err(|err| { gst_error!( RUNTIME_CAT, obj: self.gst_pad(), "Failed to push BufferList to PadSrc: {:?}", err, ); err })?; gst_log!(RUNTIME_CAT, obj: &self.gst_pad, "Processing any pending sub tasks"); while Context::current_has_sub_tasks() { Context::drain_sub_tasks().await?; } Ok(success) } pub async fn push_event(&self, event: gst::Event) -> bool { gst_log!(RUNTIME_CAT, obj: &self.gst_pad, "Pushing {:?}", event); let was_handled = self.gst_pad().push_event(event); gst_log!(RUNTIME_CAT, obj: &self.gst_pad, "Processing any pending sub tasks"); while Context::current_has_sub_tasks() { if Context::drain_sub_tasks().await.is_err() { return false; } } was_handled } } /// A [`PadSrc`] which can be moved in [`handler`]s functions and `Future`s. /// /// Call [`upgrade`] to use the [`PadSrc`]. /// /// *See the [`pad` module] documentation for a description of the model.* /// /// [`PadSrc`]: struct.PadSrc.html /// [`handler`]: trait.PadSrcHandler.html /// [`upgrade`]: struct.PadSrcWeak.html#method.upgrade /// [`pad` module]: index.html #[derive(Clone, Debug)] pub struct PadSrcWeak(Weak<PadSrcInner>); impl PadSrcWeak { pub fn upgrade(&self) -> Option<PadSrcRef<'_>> { self.0.upgrade().map(PadSrcRef::new) } } /// A [`PadSrc`] to be used in `Handler`s functions and `Future`s. /// /// Call [`downgrade`] if you need to `clone` the [`PadSrc`]. /// /// *See the [`pad` module] documentation for a description of the model.* /// /// [`PadSrc`]: struct.PadSrc.html /// [`PadSrcWeak`]: struct.PadSrcWeak.html /// [`downgrade`]: struct.PadSrcRef.html#method.downgrade /// [`pad` module]: index.html #[derive(Debug)] pub struct PadSrcRef<'a> { strong: Arc<PadSrcInner>, phantom: PhantomData<&'a Self>, } impl<'a> PadSrcRef<'a> { fn new(inner_arc: Arc<PadSrcInner>) -> Self { PadSrcRef { strong: inner_arc, phantom: PhantomData, } } pub fn downgrade(&self) -> PadSrcWeak { PadSrcWeak(Arc::downgrade(&self.strong)) } fn activate_mode_hook( &self, mode: gst::PadMode, active: bool, ) -> Result<(), gst::LoggableError> { // Important: don't panic here as the hook is used without `catch_panic_pad_function` // in the default `activatemode` handling gst_log!(RUNTIME_CAT, obj: self.gst_pad(), "ActivateMode {:?}, {}", mode, active); if mode == gst::PadMode::Pull { gst_error!(RUNTIME_CAT, obj: self.gst_pad(), "Pull mode not supported by PadSrc"); return Err(gst_loggable_error!( RUNTIME_CAT, "Pull mode not supported by PadSrc" )); } Ok(()) } } impl<'a> Deref for PadSrcRef<'a> { type Target = PadSrcInner; fn deref(&self) -> &Self::Target { &self.strong } } /// The `PadSrc` which `Element`s must own. /// /// Call [`downgrade`] if you need to `clone` the `PadSrc`. /// /// *See the [`pad` module] documentation for a description of the model.* /// /// [`downgrade`]: struct.PadSrc.html#method.downgrade /// [`pad` module]: index.html #[derive(Debug)] pub struct PadSrc(Arc<PadSrcInner>); impl PadSrc { pub fn new(gst_pad: gst::Pad, handler: impl PadSrcHandler) -> Self { let this = PadSrc(Arc::new(PadSrcInner::new(gst_pad))); this.init_pad_functions(handler); this } pub fn downgrade(&self) -> PadSrcWeak { PadSrcWeak(Arc::downgrade(&self.0)) } pub fn as_ref(&self) -> PadSrcRef<'_> { PadSrcRef::new(Arc::clone(&self.0)) } pub fn check_reconfigure(&self) -> bool { self.0.gst_pad().check_reconfigure() } fn init_pad_functions<H: PadSrcHandler>(&self, handler: H) { // FIXME: Do this better unsafe { let handler_clone = handler.clone(); let inner_arc = Arc::clone(&self.0); self.0 .gst_pad() .set_activate_function(move |gst_pad, parent| { let handler = handler_clone.clone(); let inner_arc = inner_arc.clone(); H::ElementImpl::catch_panic_pad_function( parent, || { gst_error!(RUNTIME_CAT, obj: gst_pad, "Panic in PadSrc activate"); Err(gst_loggable_error!(RUNTIME_CAT, "Panic in PadSrc activate")) }, move |imp, element| { let this_ref = PadSrcRef::new(inner_arc); handler.src_activate( &this_ref, imp, element.dynamic_cast_ref::<gst::Element>().unwrap(), ) }, ) }); let handler_clone = handler.clone(); let inner_arc = Arc::clone(&self.0); self.gst_pad() .set_activatemode_function(move |gst_pad, parent, mode, active| { let handler = handler_clone.clone(); let inner_arc = inner_arc.clone(); H::ElementImpl::catch_panic_pad_function( parent, || { gst_error!(RUNTIME_CAT, obj: gst_pad, "Panic in PadSrc activatemode"); Err(gst_loggable_error!( RUNTIME_CAT, "Panic in PadSrc activatemode" )) }, move |imp, element| { let this_ref = PadSrcRef::new(inner_arc); this_ref.activate_mode_hook(mode, active)?; handler.src_activatemode( &this_ref, imp, element.dynamic_cast_ref::<gst::Element>().unwrap(), mode, active, ) }, ) }); // No need to `set_event_function` since `set_event_full_function` // overrides it and dispatches to `src_event` when necessary let handler_clone = handler.clone(); let inner_arc = Arc::clone(&self.0); self.gst_pad() .set_event_full_function(move |_gst_pad, parent, event| { let handler = handler_clone.clone(); let inner_arc = inner_arc.clone(); H::ElementImpl::catch_panic_pad_function( parent, || Err(FlowError::Error), move |imp, element| { let this_ref = PadSrcRef::new(inner_arc); handler.src_event_full( &this_ref, imp, element.dynamic_cast_ref::<gst::Element>().unwrap(), event, ) }, ) }); let inner_arc = Arc::clone(&self.0); self.gst_pad() .set_query_function(move |_gst_pad, parent, query| { let handler = handler.clone(); let inner_arc = inner_arc.clone(); H::ElementImpl::catch_panic_pad_function( parent, || false, move |imp, element| { let this_ref = PadSrcRef::new(inner_arc); if !query.is_serialized() { handler.src_query(&this_ref, imp, element.dynamic_cast_ref::<gst::Element>().unwrap(), query) } else { gst_fixme!(RUNTIME_CAT, obj: this_ref.gst_pad(), "Serialized Query not supported"); false } }, ) }); } } } impl Drop for PadSrc { fn drop(&mut self) { // FIXME: Do this better unsafe { self.gst_pad() .set_activate_function(move |_gst_pad, _parent| { Err(gst_loggable_error!(RUNTIME_CAT, "PadSrc no longer exists")) }); self.gst_pad() .set_activatemode_function(move |_gst_pad, _parent, _mode, _active| { Err(gst_loggable_error!(RUNTIME_CAT, "PadSrc no longer exists")) }); self.gst_pad() .set_event_function(move |_gst_pad, _parent, _event| false); self.gst_pad() .set_event_full_function(move |_gst_pad, _parent, _event| Err(FlowError::Flushing)); self.gst_pad() .set_query_function(move |_gst_pad, _parent, _query| false); } } } impl Deref for PadSrc { type Target = PadSrcInner; fn deref(&self) -> &Self::Target { &self.0 } } /// A trait to define `handler`s for [`PadSink`] callbacks. /// /// *See the [`pad` module] documentation for a description of the model.* /// /// [`PadSink`]: struct.PadSink.html /// [`pad` module]: index.html pub trait PadSinkHandler: Clone + Send + Sync + 'static { type ElementImpl: ElementImpl + ObjectSubclass; // FIXME: Once associated type bounds are stable we should use ObjectSubclass::Type below // instead of &gst::Element fn sink_activate( &self, pad: &PadSinkRef, _imp: &Self::ElementImpl, _element: &gst::Element, ) -> Result<(), gst::LoggableError> { let gst_pad = pad.gst_pad(); if gst_pad.is_active() { gst_debug!( RUNTIME_CAT, obj: gst_pad, "Already activated in {:?} mode ", gst_pad.get_mode() ); return Ok(()); } gst_pad .activate_mode(gst::PadMode::Push, true) .map_err(|err| { gst_error!( RUNTIME_CAT, obj: gst_pad, "Error in PadSink activate: {:?}", err ); gst_loggable_error!(RUNTIME_CAT, "Error in PadSink activate: {:?}", err) }) } fn sink_activatemode( &self, _pad: &PadSinkRef, _imp: &Self::ElementImpl, _element: &gst::Element, _mode: gst::PadMode, _active: bool, ) -> Result<(), gst::LoggableError> { Ok(()) } fn sink_chain( &self, _pad: &PadSinkRef, _imp: &Self::ElementImpl, _element: &gst::Element, _buffer: gst::Buffer, ) -> BoxFuture<'static, Result<FlowSuccess, FlowError>> { future::err(FlowError::NotSupported).boxed() } fn sink_chain_list( &self, _pad: &PadSinkRef, _imp: &Self::ElementImpl, _element: &gst::Element, _buffer_list: gst::BufferList, ) -> BoxFuture<'static, Result<FlowSuccess, FlowError>> { future::err(FlowError::NotSupported).boxed() } fn sink_event( &self, pad: &PadSinkRef, _imp: &Self::ElementImpl, element: &gst::Element, event: gst::Event, ) -> bool { assert!(!event.is_serialized()); gst_log!(RUNTIME_CAT, obj: pad.gst_pad(), "Handling {:?}", event); pad.gst_pad().event_default(Some(element), event) } fn sink_event_serialized( &self, pad: &PadSinkRef, _imp: &Self::ElementImpl, element: &gst::Element, event: gst::Event, ) -> BoxFuture<'static, bool> { assert!(event.is_serialized()); let pad_weak = pad.downgrade(); let element = element.clone(); async move { let pad = pad_weak.upgrade().expect("PadSink no longer exists"); gst_log!(RUNTIME_CAT, obj: pad.gst_pad(), "Handling {:?}", event); pad.gst_pad().event_default(Some(&element), event) } .boxed() } fn sink_event_full( &self, pad: &PadSinkRef, imp: &Self::ElementImpl, element: &gst::Element, event: gst::Event, ) -> Result<FlowSuccess, FlowError> { assert!(!event.is_serialized()); // default is to dispatch to `sink_event` // (as implemented in `gst_pad_send_event_unchecked`) let event_type = event.get_type(); event_to_event_full(self.sink_event(pad, imp, element, event), event_type) } fn sink_event_full_serialized( &self, pad: &PadSinkRef, imp: &Self::ElementImpl, element: &gst::Element, event: gst::Event,<|fim▁hole|> assert!(event.is_serialized()); // default is to dispatch to `sink_event` // (as implemented in `gst_pad_send_event_unchecked`) let event_type = event.get_type(); event_to_event_full_serialized( self.sink_event_serialized(pad, imp, element, event), event_type, ) } fn sink_query( &self, pad: &PadSinkRef, _imp: &Self::ElementImpl, element: &gst::Element, query: &mut gst::QueryRef, ) -> bool { if query.is_serialized() { gst_log!(RUNTIME_CAT, obj: pad.gst_pad(), "Dropping {:?}", query); // FIXME serialized queries should be handled with the dataflow // but we can't return a `Future` because we couldn't honor QueryRef's lifetime false } else { gst_log!(RUNTIME_CAT, obj: pad.gst_pad(), "Handling {:?}", query); pad.gst_pad().query_default(Some(element), query) } } } #[derive(Debug)] pub struct PadSinkInner { gst_pad: gst::Pad, } impl PadSinkInner { fn new(gst_pad: gst::Pad) -> Self { if gst_pad.get_direction() != gst::PadDirection::Sink { panic!("Wrong pad direction for PadSink"); } PadSinkInner { gst_pad } } pub fn gst_pad(&self) -> &gst::Pad { &self.gst_pad } } /// A [`PadSink`] which can be moved in `Handler`s functions and `Future`s. /// /// Call [`upgrade`] to use the [`PadSink`]. /// /// *See the [`pad` module] documentation for a description of the model.* /// /// [`PadSink`]: struct.PadSink.html /// [`upgrade`]: struct.PadSinkWeak.html#method.upgrade /// [`pad` module]: index.html #[derive(Clone, Debug)] pub struct PadSinkWeak(Weak<PadSinkInner>); impl PadSinkWeak { pub fn upgrade(&self) -> Option<PadSinkRef<'_>> { self.0.upgrade().map(PadSinkRef::new) } } /// A [`PadSink`] to be used in [`handler`]s functions and `Future`s. /// /// Call [`downgrade`] if you need to `clone` the [`PadSink`]. /// /// *See the [`pad` module] documentation for a description of the model.* /// /// [`PadSink`]: struct.PadSink.html /// [`handler`]: trait.PadSinkHandler.html /// [`downgrade`]: struct.PadSinkRef.html#method.downgrade /// [`pad` module]: index.html #[derive(Debug)] pub struct PadSinkRef<'a> { strong: Arc<PadSinkInner>, phantom: PhantomData<&'a Self>, } impl<'a> PadSinkRef<'a> { fn new(inner_arc: Arc<PadSinkInner>) -> Self { PadSinkRef { strong: inner_arc, phantom: PhantomData, } } pub fn downgrade(&self) -> PadSinkWeak { PadSinkWeak(Arc::downgrade(&self.strong)) } fn activate_mode_hook( &self, mode: gst::PadMode, active: bool, ) -> Result<(), gst::LoggableError> { // Important: don't panic here as the hook is used without `catch_panic_pad_function` // in the default `activatemode` handling gst_log!(RUNTIME_CAT, obj: self.gst_pad(), "ActivateMode {:?}, {}", mode, active); if mode == gst::PadMode::Pull { gst_error!(RUNTIME_CAT, obj: self.gst_pad(), "Pull mode not supported by PadSink"); return Err(gst_loggable_error!( RUNTIME_CAT, "Pull mode not supported by PadSink" )); } Ok(()) } fn handle_future( &self, fut: impl Future<Output = Result<FlowSuccess, FlowError>> + Send + 'static, ) -> Result<FlowSuccess, FlowError> { if let Err(fut) = Context::add_sub_task(fut.map(|res| res.map(drop))) { block_on_or_add_sub_task(fut.map(|res| res.map(|_| gst::FlowSuccess::Ok))) .unwrap_or(Ok(gst::FlowSuccess::Ok)) } else { Ok(gst::FlowSuccess::Ok) } } } impl<'a> Deref for PadSinkRef<'a> { type Target = PadSinkInner; fn deref(&self) -> &Self::Target { &self.strong } } /// The `PadSink` which `Element`s must own. /// /// Call [`downgrade`] if you need to `clone` the `PadSink`. /// /// *See the [`pad` module] documentation for a description of the model.* /// /// [`downgrade`]: struct.PadSink.html#method.downgrade /// [`pad` module]: index.html #[derive(Debug)] pub struct PadSink(Arc<PadSinkInner>); impl PadSink { pub fn new(gst_pad: gst::Pad, handler: impl PadSinkHandler) -> Self { let this = PadSink(Arc::new(PadSinkInner::new(gst_pad))); this.init_pad_functions(handler); this } pub fn downgrade(&self) -> PadSinkWeak { PadSinkWeak(Arc::downgrade(&self.0)) } pub fn as_ref(&self) -> PadSinkRef<'_> { PadSinkRef::new(Arc::clone(&self.0)) } fn init_pad_functions<H: PadSinkHandler>(&self, handler: H) { // FIXME: Do this better unsafe { let handler_clone = handler.clone(); let inner_arc = Arc::clone(&self.0); self.gst_pad() .set_activate_function(move |gst_pad, parent| { let handler = handler_clone.clone(); let inner_arc = inner_arc.clone(); H::ElementImpl::catch_panic_pad_function( parent, || { gst_error!(RUNTIME_CAT, obj: gst_pad, "Panic in PadSink activate"); Err(gst_loggable_error!( RUNTIME_CAT, "Panic in PadSink activate" )) }, move |imp, element| { let this_ref = PadSinkRef::new(inner_arc); handler.sink_activate( &this_ref, imp, element.dynamic_cast_ref::<gst::Element>().unwrap(), ) }, ) }); let handler_clone = handler.clone(); let inner_arc = Arc::clone(&self.0); self.gst_pad() .set_activatemode_function(move |gst_pad, parent, mode, active| { let handler = handler_clone.clone(); let inner_arc = inner_arc.clone(); H::ElementImpl::catch_panic_pad_function( parent, || { gst_error!(RUNTIME_CAT, obj: gst_pad, "Panic in PadSink activatemode"); Err(gst_loggable_error!( RUNTIME_CAT, "Panic in PadSink activatemode" )) }, move |imp, element| { let this_ref = PadSinkRef::new(inner_arc); this_ref.activate_mode_hook(mode, active)?; handler.sink_activatemode( &this_ref, imp, element.dynamic_cast_ref::<gst::Element>().unwrap(), mode, active, ) }, ) }); let handler_clone = handler.clone(); let inner_arc = Arc::clone(&self.0); self.gst_pad() .set_chain_function(move |_gst_pad, parent, buffer| { let handler = handler_clone.clone(); let inner_arc = inner_arc.clone(); H::ElementImpl::catch_panic_pad_function( parent, || Err(FlowError::Error), move |imp, element| { if Context::current_has_sub_tasks() { let this_weak = PadSinkWeak(Arc::downgrade(&inner_arc)); let handler = handler.clone(); let element = element.clone().dynamic_cast::<gst::Element>().unwrap(); let delayed_fut = async move { let imp = <H::ElementImpl as ObjectSubclass>::from_instance( element.unsafe_cast_ref(), ); let this_ref = this_weak.upgrade().ok_or(gst::FlowError::Flushing)?; handler.sink_chain(&this_ref, imp, &element, buffer).await }; let _ = Context::add_sub_task(delayed_fut.map(|res| res.map(drop))); Ok(gst::FlowSuccess::Ok) } else { let this_ref = PadSinkRef::new(inner_arc); let chain_fut = handler.sink_chain( &this_ref, imp, element.dynamic_cast_ref::<gst::Element>().unwrap(), buffer, ); this_ref.handle_future(chain_fut) } }, ) }); let handler_clone = handler.clone(); let inner_arc = Arc::clone(&self.0); self.gst_pad() .set_chain_list_function(move |_gst_pad, parent, list| { let handler = handler_clone.clone(); let inner_arc = inner_arc.clone(); H::ElementImpl::catch_panic_pad_function( parent, || Err(FlowError::Error), move |imp, element| { if Context::current_has_sub_tasks() { let this_weak = PadSinkWeak(Arc::downgrade(&inner_arc)); let handler = handler.clone(); let element = element.clone().dynamic_cast::<gst::Element>().unwrap(); let delayed_fut = async move { let imp = <H::ElementImpl as ObjectSubclass>::from_instance( element.unsafe_cast_ref(), ); let this_ref = this_weak.upgrade().ok_or(gst::FlowError::Flushing)?; handler .sink_chain_list(&this_ref, imp, &element, list) .await }; let _ = Context::add_sub_task(delayed_fut.map(|res| res.map(drop))); Ok(gst::FlowSuccess::Ok) } else { let this_ref = PadSinkRef::new(inner_arc); let chain_list_fut = handler.sink_chain_list( &this_ref, imp, element.dynamic_cast_ref::<gst::Element>().unwrap(), list, ); this_ref.handle_future(chain_list_fut) } }, ) }); // No need to `set_event_function` since `set_event_full_function` // overrides it and dispatches to `sink_event` when necessary let handler_clone = handler.clone(); let inner_arc = Arc::clone(&self.0); self.gst_pad() .set_event_full_function(move |_gst_pad, parent, event| { let handler = handler_clone.clone(); let inner_arc = inner_arc.clone(); H::ElementImpl::catch_panic_pad_function( parent, || Err(FlowError::Error), move |imp, element| { if event.is_serialized() { if Context::current_has_sub_tasks() { let this_weak = PadSinkWeak(Arc::downgrade(&inner_arc)); let handler = handler.clone(); let element = element.clone().dynamic_cast::<gst::Element>().unwrap(); let delayed_fut = async move { let imp = <H::ElementImpl as ObjectSubclass>::from_instance( element.unsafe_cast_ref(), ); let this_ref = this_weak.upgrade().ok_or(gst::FlowError::Flushing)?; handler .sink_event_full_serialized( &this_ref, imp, &element, event, ) .await }; let _ = Context::add_sub_task(delayed_fut.map(|res| res.map(drop))); Ok(gst::FlowSuccess::Ok) } else { let this_ref = PadSinkRef::new(inner_arc); let event_fut = handler.sink_event_full_serialized( &this_ref, imp, element.dynamic_cast_ref::<gst::Element>().unwrap(), event, ); this_ref.handle_future(event_fut) } } else { let this_ref = PadSinkRef::new(inner_arc); handler.sink_event_full( &this_ref, imp, element.dynamic_cast_ref::<gst::Element>().unwrap(), event, ) } }, ) }); let inner_arc = Arc::clone(&self.0); self.gst_pad() .set_query_function(move |_gst_pad, parent, query| { let handler = handler.clone(); let inner_arc = inner_arc.clone(); H::ElementImpl::catch_panic_pad_function( parent, || false, move |imp, element| { let this_ref = PadSinkRef::new(inner_arc); if !query.is_serialized() { handler.sink_query(&this_ref, imp, element.dynamic_cast_ref::<gst::Element>().unwrap(), query) } else { gst_fixme!(RUNTIME_CAT, obj: this_ref.gst_pad(), "Serialized Query not supported"); false } }, ) }); } } } impl Drop for PadSink { fn drop(&mut self) { // FIXME: Do this better unsafe { self.gst_pad() .set_activate_function(move |_gst_pad, _parent| { Err(gst_loggable_error!(RUNTIME_CAT, "PadSink no longer exists")) }); self.gst_pad() .set_activatemode_function(move |_gst_pad, _parent, _mode, _active| { Err(gst_loggable_error!(RUNTIME_CAT, "PadSink no longer exists")) }); self.gst_pad() .set_chain_function(move |_gst_pad, _parent, _buffer| Err(FlowError::Flushing)); self.gst_pad() .set_chain_list_function(move |_gst_pad, _parent, _list| Err(FlowError::Flushing)); self.gst_pad() .set_event_function(move |_gst_pad, _parent, _event| false); self.gst_pad() .set_event_full_function(move |_gst_pad, _parent, _event| Err(FlowError::Flushing)); self.gst_pad() .set_query_function(move |_gst_pad, _parent, _query| false); } } } impl Deref for PadSink { type Target = PadSinkInner; fn deref(&self) -> &Self::Target { &self.0 } }<|fim▁end|>
) -> BoxFuture<'static, Result<FlowSuccess, FlowError>> {
<|file_name|>BaseModule.ts<|end_file_name|><|fim▁begin|>///<reference path="../../node_modules/grafana-sdk-mocks/app/headers/common.d.ts" /> import angular from 'angular'; import { MetricsPanelCtrl } from 'app/plugins/sdk'; import { BaseParser } from './BaseParser'; abstract class BasePanelCtrl extends MetricsPanelCtrl { static templateUrl = 'module.html'; dataSourceResponse: any; insightsPanelData: any; panelEditor: any; panel: any; responseParser: BaseParser; /** @ngInject */ constructor($scope, $injector) { super($scope, $injector); this.events.on('data-received', this.onDataReceived.bind(this)); this.events.on('data-error', this.onDataError.bind(this)); this.events.on('data-snapshot-load', this.onDataReceived.bind(this)); this.events.on('init-edit-mode', this.onInitEditMode.bind(this)); this.events.on('init-panel-actions', this.onInitPanelActions.bind(this)); this.initializeBaseParser(); this.initializeInsightsPanelMetadata(); } initializeBaseParser() { let datasourceType = this.getDatasourceType(); if (datasourceType === null || datasourceType === undefined) { let self = this; setTimeout(function () { self.initializeBaseParser(); }, 50); } else { this.responseParser = new BaseParser(datasourceType); } } initializeInsightsPanelMetadata() { if (this.panel.insightsPanelData) { this.insightsPanelData = this.panel.insightsPanelData; } else { this.insightsPanelData = {}; this.panel.insightsPanelData = this.insightsPanelData; } } registerEditor(editor) { this.panelEditor = editor; } getInsightsPanelData() { return this.insightsPanelData; } getPanel() { return this.panel; } getDatasourceType() { if (this.datasource) { return this.datasource.meta.name; } return null; } getResponseParser() { return this.responseParser; } onInitEditMode() { this.addEditorTab('Options', this.panelEditor.loadEditorCtrl.bind(this.buildEditorCtrlData()), 2); } buildEditorCtrlData() { return { templateUrl: 'public/plugins/' + this['pluginId'] + '/editor.html', controller: this.panelEditor, controllerAs: 'editorCtrl'<|fim▁hole|> //actions.push({ text: 'Export CSV', click: 'ctrl.exportCsv()' }); } issueQueries(datasource) { return super.issueQueries(datasource); } onDataError(err) { this.dataSourceResponse = []; this.render(); } onDataReceived(dataList) { this.dataSourceResponse = dataList; this.render(); } render() { this.handlePreRender(this.dataSourceResponse); return super.render(this.dataSourceResponse); } //Override for rendering. abstract handlePreRender(dataSourceResponse); //Essential link(scope, elem, attrs, ctrl) { var data; var panel = ctrl.panel; var pageCount = 0; var formaters = []; function renderPanel() { } ctrl.events.on('render', function (renderData) { data = renderData || data; if (data) { renderPanel(); } ctrl.renderingCompleted(); }); } } export { BasePanelCtrl };<|fim▁end|>
}; } onInitPanelActions(actions) {
<|file_name|>markdown_checklist.py<|end_file_name|><|fim▁begin|>#Library avaiable here: https://github.com/FND/markdown-checklist import re from markdown.extensions import Extension<|fim▁hole|> def makeExtension(configs=[]): return ChecklistExtension(configs=configs) class ChecklistExtension(Extension): def extendMarkdown(self, md, md_globals): md.postprocessors.add('checklist', ChecklistPostprocessor(md), '>raw_html') class ChecklistPostprocessor(Postprocessor): """ adds checklist class to list element """ pattern = re.compile(r'<li>\[([ Xx])\]') def run(self, html): html = re.sub(self.pattern, self._convert_checkbox, html) before = '<ul>\n<li><input type="checkbox"' after = before.replace('<ul>', '<ul class="checklist">') return html.replace(before, after) def _convert_checkbox(self, match): state = match.group(1) checked = ' checked' if state != ' ' else '' return '<li><input type="checkbox" disabled%s>' % checked<|fim▁end|>
from markdown.preprocessors import Preprocessor from markdown.postprocessors import Postprocessor
<|file_name|>test_scan.py<|end_file_name|><|fim▁begin|># Copyright 2016 Rudrajit Tapadar # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at<|fim▁hole|># Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from base import TestPscan import errno import mock from StringIO import StringIO import sys class TestScan(TestPscan): @mock.patch('socket.socket.connect') def test_tcp_port_open(self, mock_connect): hosts = "127.0.0.1" ports = "22" mock_connect.return_value = None scanner = self.get_scanner_obj(hosts, ports) scanner.tcp() h = self.get_host_obj(hosts, [22]) h[0].ports[0].status = "Open" self.assertPortsEqual(scanner.hosts[0].ports, h[0].ports) @mock.patch('socket.socket.connect') def test_tcp_port_closed(self, mock_connect): hosts = "127.0.0.1" ports = "22" mock_connect.side_effect = IOError() scanner = self.get_scanner_obj(hosts, ports) scanner.tcp() h = self.get_host_obj(hosts, [22]) h[0].ports[0].status = "Closed" self.assertPortsEqual(scanner.hosts[0].ports, h[0].ports) @mock.patch('socket.socket.connect') def test_tcp_port_range(self, mock_connect): hosts = "127.0.0.1" ports = "21-22" mock_connect.return_value = None mock_connect.side_effect = [IOError(), None] scanner = self.get_scanner_obj(hosts, ports) scanner.tcp() h = self.get_host_obj(hosts, [21, 22]) h[0].ports[0].status = "Closed" h[0].ports[1].status = "Open" self.assertPortsEqual(scanner.hosts[0].ports, h[0].ports) @mock.patch('socket.socket.connect') def test_show_open_port(self, mock_connect): hosts = "127.0.0.1" ports = "5672" mock_connect.return_value = None scanner = self.get_scanner_obj(hosts, ports) scanner.tcp() s = sys.stdout o = StringIO() sys.stdout = o output = ( "Showing results for target: 127.0.0.1\n" "+------+----------+-------+---------+\n" "| Port | Protocol | State | Service |\n" "+------+----------+-------+---------+\n" "| 5672 | TCP | Open | amqp |\n" "+------+----------+-------+---------+" ) scanner.show() self.assertEqual(o.getvalue().strip(), output) sys.stdout = s @mock.patch('socket.socket.connect') def test_show_closed_port(self, mock_connect): hosts = "127.0.0.1" ports = "5673" mock_connect.side_effect = IOError() scanner = self.get_scanner_obj(hosts, ports) scanner.tcp() s = sys.stdout o = StringIO() sys.stdout = o output = ( "Showing results for target: 127.0.0.1\n" "+------+----------+--------+---------+\n" "| Port | Protocol | State | Service |\n" "+------+----------+--------+---------+\n" "| 5673 | TCP | Closed | unknown |\n" "+------+----------+--------+---------+" ) scanner.show() self.assertEqual(o.getvalue().strip(), output) sys.stdout = s @mock.patch('socket.socket.connect') def test_show_closed_port_range(self, mock_connect): hosts = "127.0.0.1" ports = "5673-5674" mock_connect.side_effect = IOError(errno.ECONNREFUSED) scanner = self.get_scanner_obj(hosts, ports) scanner.tcp() s = sys.stdout o = StringIO() sys.stdout = o output = ( "Showing results for target: 127.0.0.1\n" "All 2 scanned ports are closed on the target." ) scanner.show() self.assertEqual(o.getvalue().strip(), output) sys.stdout = s @mock.patch('socket.socket.connect') def test_show_partially_open_port_range(self, mock_connect): hosts = "127.0.0.1" ports = "5671-5672" mock_connect.return_value = None mock_connect.side_effect = [IOError(), None] scanner = self.get_scanner_obj(hosts, ports) scanner.tcp() s = sys.stdout o = StringIO() sys.stdout = o output = ( "Showing results for target: 127.0.0.1\n" "+------+----------+-------+---------+\n" "| Port | Protocol | State | Service |\n" "+------+----------+-------+---------+\n" "| 5672 | TCP | Open | amqp |\n" "+------+----------+-------+---------+" ) scanner.show() self.assertEqual(o.getvalue().strip(), output) @mock.patch('socket.socket.connect') def test_udp_port_open(self, mock_connect): hosts = "127.0.0.1" ports = "53" mock_connect.return_value = None scanner = self.get_scanner_obj(hosts, ports) scanner.udp() #h = self.get_host_obj(hosts, [22]) #h[0].ports[0].status = "Open" #self.assertPortsEqual(scanner.hosts[0].ports, # h[0].ports)<|fim▁end|>
# # http://www.apache.org/licenses/LICENSE-2.0 #
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># """Django Actions Log settings file.""" # from __future__ import unicode_literals from django.conf import settings from django.utils.translation import ugettext_lazy as _ CREATE = 100 SUCCESS = 110 ACTIVATE = 130 AUTH = 150 VIEW = 180 UPDATE = 200 SUSPEND = 250 UNSUSPEND = 260 DELETE = 300 TERMINATE = 500 FAILED = 999 ERROR = 1000 LOG_ACTION_CHOICES_DEFAULT = [ (CREATE, _("create")),<|fim▁hole|> (UPDATE, _("update")), (SUSPEND, _("suspend")), (UNSUSPEND, _("unsuspend")), (DELETE, _("delete")), (TERMINATE, _("terminate")), (FAILED, _("failed")), (ERROR, _("error")), ] AL_LOG_ACTION_SETTINGS = getattr( settings, 'AL_LOG_ACTION_CHOICES', LOG_ACTION_CHOICES_DEFAULT ) LOG_ACTION_CHOICES = [ (value[0], value[1]) for value in AL_LOG_ACTION_SETTINGS ]<|fim▁end|>
(SUCCESS, _("success")), (ACTIVATE, _("activate")), (AUTH, _("authorize")), (VIEW, _("view")),
<|file_name|>globs.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(decl_macro)] mod foo { pub fn f() {} } mod bar { pub fn g() {} } macro m($($t:tt)*) { $($t)* use foo::*; f(); g(); //~ ERROR cannot find function `g` in this scope } fn main() { m! { use bar::*; g(); f(); //~ ERROR cannot find function `f` in this scope } } n!(f); macro n($i:ident) { mod foo { pub fn $i() -> u32 { 0 } pub fn f() {} mod test { use super::*; fn g() { let _: u32 = $i(); let _: () = f(); } } macro n($j:ident) { mod test { use super::*;<|fim▁hole|> } } } macro n_with_super($j:ident) { mod test { use super::*; fn g() { let _: u32 = $i(); let _: () = f(); super::$j(); } } } n!(f); //~ ERROR cannot find function `f` in this scope n_with_super!(f); mod test2 { super::n! { f //~ ERROR cannot find function `f` in this scope } super::n_with_super! { f } } } }<|fim▁end|>
fn g() { let _: u32 = $i(); let _: () = f(); $j();
<|file_name|>go_thrift_gen.py<|end_file_name|><|fim▁begin|># coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import re import subprocess from pants.backend.codegen.subsystems.thrift_defaults import ThriftDefaults from pants.base.build_environment import get_buildroot from pants.base.exceptions import TaskError from pants.base.workunit import WorkUnitLabel from pants.binaries.thrift_binary import ThriftBinary from pants.task.simple_codegen_task import SimpleCodegenTask from pants.util.dirutil import safe_mkdir from pants.util.memo import memoized_property from twitter.common.collections import OrderedSet from pants.contrib.go.targets.go_thrift_library import GoThriftGenLibrary, GoThriftLibrary class GoThriftGen(SimpleCodegenTask): @classmethod def register_options(cls, register): super(GoThriftGen, cls).register_options(register) register('--strict', default=True, fingerprint=True, type=bool, help='Run thrift compiler with strict warnings.') register('--gen-options', advanced=True, fingerprint=True, help='Use these apache thrift go gen options.') register('--thrift-import', advanced=True, help='Use this thrift-import gen option to thrift.') register('--thrift-import-target', advanced=True, help='Use this thrift import on symbolic defs.') @classmethod def subsystem_dependencies(cls): return (super(GoThriftGen, cls).subsystem_dependencies() + (ThriftDefaults, ThriftBinary.Factory.scoped(cls))) @memoized_property def _thrift_binary(self): thrift_binary = ThriftBinary.Factory.scoped_instance(self).create() return thrift_binary.path @memoized_property def _deps(self): thrift_import_target = self.get_options().thrift_import_target thrift_imports = self.context.resolve(thrift_import_target) return thrift_imports @memoized_property def _service_deps(self): service_deps = self.get_options().get('service_deps') return list(self.resolve_deps(service_deps)) if service_deps else self._deps SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)') NAMESPACE_PARSER = re.compile(r'^\s*namespace go\s+([^\s]+)', re.MULTILINE) def _declares_service(self, source): with open(source) as thrift: return any(line for line in thrift if self.SERVICE_PARSER.search(line)) def _get_go_namespace(self, source): with open(source) as thrift: namespace = self.NAMESPACE_PARSER.search(thrift.read()) if not namespace: raise TaskError('Thrift file {} must contain "namespace go "', source) return namespace.group(1) def synthetic_target_extra_dependencies(self, target, target_workdir): for source in target.sources_relative_to_buildroot(): if self._declares_service(os.path.join(get_buildroot(), source)): return self._service_deps return self._deps def synthetic_target_type(self, target): return GoThriftGenLibrary def is_gentarget(self, target): return isinstance(target, GoThriftLibrary) @memoized_property def _thrift_cmd(self): cmd = [self._thrift_binary] thrift_import = 'thrift_import={}'.format(self.get_options().thrift_import) gen_options = self.get_options().gen_options if gen_options: gen_options += ',' + thrift_import else: gen_options = thrift_import cmd.extend(('--gen', 'go:{}'.format(gen_options))) if self.get_options().strict: cmd.append('-strict') if self.get_options().level == 'debug': cmd.append('-verbose') return cmd def _generate_thrift(self, target, target_workdir): target_cmd = self._thrift_cmd[:] bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt)) for base in bases: target_cmd.extend(('-I', base)) target_cmd.extend(('-o', target_workdir)) <|fim▁hole|> source = all_sources[0] target_cmd.append(os.path.join(get_buildroot(), source)) with self.context.new_workunit(name=source, labels=[WorkUnitLabel.TOOL], cmd=' '.join(target_cmd)) as workunit: result = subprocess.call(target_cmd, stdout=workunit.output('stdout'), stderr=workunit.output('stderr')) if result != 0: raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result)) gen_dir = os.path.join(target_workdir, 'gen-go') src_dir = os.path.join(target_workdir, 'src') safe_mkdir(src_dir) go_dir = os.path.join(target_workdir, 'src', 'go') os.rename(gen_dir, go_dir) @classmethod def product_types(cls): return ['go'] def execute_codegen(self, target, target_workdir): self._generate_thrift(target, target_workdir) @property def _copy_target_attributes(self): """Override `_copy_target_attributes` to exclude `provides`.""" return [a for a in super(GoThriftGen, self)._copy_target_attributes if a != 'provides'] def synthetic_target_dir(self, target, target_workdir): all_sources = list(target.sources_relative_to_buildroot()) source = all_sources[0] namespace = self._get_go_namespace(source) return os.path.join(target_workdir, 'src', 'go', namespace.replace(".", os.path.sep))<|fim▁end|>
all_sources = list(target.sources_relative_to_buildroot()) if len(all_sources) != 1: raise TaskError('go_thrift_library only supports a single .thrift source file for {}.', target)
<|file_name|>contestType.js<|end_file_name|><|fim▁begin|>export default class ContestType { constructor(options) { let _id = null; let _name = null; let _berryFlavor = null; let _color = null; Object.defineProperty(this, 'id', { enumarable: true, get() { return _id; } }); Object.defineProperty(this, 'name', { enumarable: true, get() { return _name; } }); Object.defineProperty(this, 'berryFlavor', { enumerable: true, get() { return _berryFlavor; } }); Object.defineProperty(this, 'color', {<|fim▁hole|> return _color; } }); _id = parseInt(options.id) || null; _name = options.name || "No name"; _berryFlavor = options.berryFlavor || "No flavor"; _color = options.color || "No color"; Object.seal(this); } }<|fim▁end|>
enumerable: true, get() {
<|file_name|>main.js<|end_file_name|><|fim▁begin|>'use strict'; var Foundationify = (function () { // Initalize product image gallery function on product pages function initProductImages() { // Define the scope var $productImages = $('#product-images', 'body.product'); // Select the thumbnails var $thumbs = $('ul img', $productImages); if ($thumbs.length) { // Select the large image var $largeImage = $('img', $productImages).first(); // Change the large image src and alt attributes on click<|fim▁hole|> if ($largeImage.attr('src') === $(this).parent('a').attr('href')) { return; } // Change the cursor to the loading cursor $('body').css('cursor', 'progress'); // Change the src and alt attributes of the large image $largeImage.attr('src', $(this).parent('a').attr('href')) .attr('alt', $(this).attr('alt')); }); // Return the loading cursor to default after the large image has loaded $largeImage.on('load', function () { $('body').css('cursor', 'auto'); }); } } return { init: function () { initProductImages(); } }; }()); $(document).ready(function () { Foundationify.init(); });<|fim▁end|>
$thumbs.on('click', function (e) { e.preventDefault(); // Skip if thumb matches large
<|file_name|>object_safety.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. <|fim▁hole|>//! to an object. In general, traits may only be converted to an //! object if all of their methods meet certain criteria. In particular, //! they must: //! //! - have a suitable receiver from which we can extract a vtable; //! - not reference the erased type `Self` except for in this receiver; //! - not have generic type parameters use super::supertraits; use super::elaborate_predicates; use middle::subst::{self, SelfSpace, TypeSpace}; use middle::traits; use middle::ty::{self, ToPolyTraitRef, Ty}; use std::rc::Rc; use syntax::ast; #[derive(Debug)] pub enum ObjectSafetyViolation<'tcx> { /// Self : Sized declared on the trait SizedSelf, /// Supertrait reference references `Self` an in illegal location /// (e.g. `trait Foo : Bar<Self>`) SupertraitSelf, /// Method has something illegal Method(Rc<ty::Method<'tcx>>, MethodViolationCode), } /// Reasons a method might not be object-safe. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum MethodViolationCode { /// e.g., `fn foo()` StaticMethod, /// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self` ReferencesSelf, /// e.g., `fn foo<A>()` Generic, } pub fn is_object_safe<'tcx>(tcx: &ty::ctxt<'tcx>, trait_def_id: ast::DefId) -> bool { // Because we query yes/no results frequently, we keep a cache: let def = tcx.lookup_trait_def(trait_def_id); let result = def.object_safety().unwrap_or_else(|| { let result = object_safety_violations(tcx, trait_def_id).is_empty(); // Record just a yes/no result in the cache; this is what is // queried most frequently. Note that this may overwrite a // previous result, but always with the same thing. def.set_object_safety(result); result }); debug!("is_object_safe({:?}) = {}", trait_def_id, result); result } pub fn object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>, trait_def_id: ast::DefId) -> Vec<ObjectSafetyViolation<'tcx>> { traits::supertrait_def_ids(tcx, trait_def_id) .flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id)) .collect() } fn object_safety_violations_for_trait<'tcx>(tcx: &ty::ctxt<'tcx>, trait_def_id: ast::DefId) -> Vec<ObjectSafetyViolation<'tcx>> { // Check methods for violations. let mut violations: Vec<_> = tcx.trait_items(trait_def_id).iter() .flat_map(|item| { match *item { ty::MethodTraitItem(ref m) => { object_safety_violation_for_method(tcx, trait_def_id, &**m) .map(|code| ObjectSafetyViolation::Method(m.clone(), code)) .into_iter() } _ => None.into_iter(), } }) .collect(); // Check the trait itself. if trait_has_sized_self(tcx, trait_def_id) { violations.push(ObjectSafetyViolation::SizedSelf); } if supertraits_reference_self(tcx, trait_def_id) { violations.push(ObjectSafetyViolation::SupertraitSelf); } debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}", trait_def_id, violations); violations } fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>, trait_def_id: ast::DefId) -> bool { let trait_def = tcx.lookup_trait_def(trait_def_id); let trait_ref = trait_def.trait_ref.clone(); let trait_ref = trait_ref.to_poly_trait_ref(); let predicates = tcx.lookup_super_predicates(trait_def_id); predicates .predicates .into_iter() .map(|predicate| predicate.subst_supertrait(tcx, &trait_ref)) .any(|predicate| { match predicate { ty::Predicate::Trait(ref data) => { // In the case of a trait predicate, we can skip the "self" type. data.0.trait_ref.substs.types.get_slice(TypeSpace) .iter() .cloned() .any(is_self) } ty::Predicate::Projection(..) | ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) | ty::Predicate::Equate(..) => { false } } }) } fn trait_has_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>, trait_def_id: ast::DefId) -> bool { let trait_def = tcx.lookup_trait_def(trait_def_id); let trait_predicates = tcx.lookup_predicates(trait_def_id); generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates) } fn generics_require_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>, generics: &ty::Generics<'tcx>, predicates: &ty::GenericPredicates<'tcx>) -> bool { let sized_def_id = match tcx.lang_items.sized_trait() { Some(def_id) => def_id, None => { return false; /* No Sized trait, can't require it! */ } }; // Search for a predicate like `Self : Sized` amongst the trait bounds. let free_substs = tcx.construct_free_substs(generics, ast::DUMMY_NODE_ID); let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec(); elaborate_predicates(tcx, predicates) .any(|predicate| { match predicate { ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => { is_self(trait_pred.0.self_ty()) } ty::Predicate::Projection(..) | ty::Predicate::Trait(..) | ty::Predicate::Equate(..) | ty::Predicate::RegionOutlives(..) | ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | ty::Predicate::TypeOutlives(..) => { false } } }) } /// Returns `Some(_)` if this method makes the containing trait not object safe. fn object_safety_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>, trait_def_id: ast::DefId, method: &ty::Method<'tcx>) -> Option<MethodViolationCode> { // Any method that has a `Self : Sized` requisite is otherwise // exempt from the regulations. if generics_require_sized_self(tcx, &method.generics, &method.predicates) { return None; } virtual_call_violation_for_method(tcx, trait_def_id, method) } /// We say a method is *vtable safe* if it can be invoked on a trait /// object. Note that object-safe traits can have some /// non-vtable-safe methods, so long as they require `Self:Sized` or /// otherwise ensure that they cannot be used when `Self=Trait`. pub fn is_vtable_safe_method<'tcx>(tcx: &ty::ctxt<'tcx>, trait_def_id: ast::DefId, method: &ty::Method<'tcx>) -> bool { virtual_call_violation_for_method(tcx, trait_def_id, method).is_none() } /// Returns `Some(_)` if this method cannot be called on a trait /// object; this does not necessarily imply that the enclosing trait /// is not object safe, because the method might have a where clause /// `Self:Sized`. fn virtual_call_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>, trait_def_id: ast::DefId, method: &ty::Method<'tcx>) -> Option<MethodViolationCode> { // The method's first parameter must be something that derefs (or // autorefs) to `&self`. For now, we only accept `self`, `&self` // and `Box<Self>`. match method.explicit_self { ty::StaticExplicitSelfCategory => { return Some(MethodViolationCode::StaticMethod); } ty::ByValueExplicitSelfCategory | ty::ByReferenceExplicitSelfCategory(..) | ty::ByBoxExplicitSelfCategory => { } } // The `Self` type is erased, so it should not appear in list of // arguments or return type apart from the receiver. let ref sig = method.fty.sig; for &input_ty in &sig.0.inputs[1..] { if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) { return Some(MethodViolationCode::ReferencesSelf); } } if let ty::FnConverging(result_type) = sig.0.output { if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) { return Some(MethodViolationCode::ReferencesSelf); } } // We can't monomorphize things like `fn foo<A>(...)`. if !method.generics.types.is_empty_in(subst::FnSpace) { return Some(MethodViolationCode::Generic); } None } fn contains_illegal_self_type_reference<'tcx>(tcx: &ty::ctxt<'tcx>, trait_def_id: ast::DefId, ty: Ty<'tcx>) -> bool { // This is somewhat subtle. In general, we want to forbid // references to `Self` in the argument and return types, // since the value of `Self` is erased. However, there is one // exception: it is ok to reference `Self` in order to access // an associated type of the current trait, since we retain // the value of those associated types in the object type // itself. // // ```rust // trait SuperTrait { // type X; // } // // trait Trait : SuperTrait { // type Y; // fn foo(&self, x: Self) // bad // fn foo(&self) -> Self // bad // fn foo(&self) -> Option<Self> // bad // fn foo(&self) -> Self::Y // OK, desugars to next example // fn foo(&self) -> <Self as Trait>::Y // OK // fn foo(&self) -> Self::X // OK, desugars to next example // fn foo(&self) -> <Self as SuperTrait>::X // OK // } // ``` // // However, it is not as simple as allowing `Self` in a projected // type, because there are illegal ways to use `Self` as well: // // ```rust // trait Trait : SuperTrait { // ... // fn foo(&self) -> <Self as SomeOtherTrait>::X; // } // ``` // // Here we will not have the type of `X` recorded in the // object type, and we cannot resolve `Self as SomeOtherTrait` // without knowing what `Self` is. let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None; let mut error = false; ty.maybe_walk(|ty| { match ty.sty { ty::TyParam(ref param_ty) => { if param_ty.space == SelfSpace { error = true; } false // no contained types to walk } ty::TyProjection(ref data) => { // This is a projected type `<Foo as SomeTrait>::X`. // Compute supertraits of current trait lazily. if supertraits.is_none() { let trait_def = tcx.lookup_trait_def(trait_def_id); let trait_ref = ty::Binder(trait_def.trait_ref.clone()); supertraits = Some(traits::supertraits(tcx, trait_ref).collect()); } // Determine whether the trait reference `Foo as // SomeTrait` is in fact a supertrait of the // current trait. In that case, this type is // legal, because the type `X` will be specified // in the object type. Note that we can just use // direct equality here because all of these types // are part of the formal parameter listing, and // hence there should be no inference variables. let projection_trait_ref = ty::Binder(data.trait_ref.clone()); let is_supertrait_of_current_trait = supertraits.as_ref().unwrap().contains(&projection_trait_ref); if is_supertrait_of_current_trait { false // do not walk contained types, do not report error, do collect $200 } else { true // DO walk contained types, POSSIBLY reporting an error } } _ => true, // walk contained types, if any } }); error } fn is_self<'tcx>(ty: Ty<'tcx>) -> bool { match ty.sty { ty::TyParam(ref data) => data.space == subst::SelfSpace, _ => false, } }<|fim▁end|>
//! "Object safety" refers to the ability for a trait to be converted
<|file_name|>mapslackline.js<|end_file_name|><|fim▁begin|>var map; var infoWindow; <|fim▁hole|>// A variável markersData guarda a informação necessária a cada marcador // Para utilizar este código basta alterar a informação contida nesta variável var markersData = [ { lat: -3.741262, lng: -38.539389, nome: "Campus do Pici - Universidade Federal do Ceará", endereco:"Av. Mister Hull, s/n", telefone: "(85) 3366-9500" // não colocar virgula no último item de cada maracdor }, { lat: -3.780833, lng: -38.469656, nome: "Colosso Lake Lounge", endereco:"Rua Hermenegildo Sá Cavalcante, s/n", telefone: "(85) 98160-0088" // não colocar virgula no último item de cada maracdor } // não colocar vírgula no último marcador ]; function initialize() { var mapOptions = { center: new google.maps.LatLng(40.601203,-8.668173), zoom: 9, mapTypeId: 'roadmap', }; map = new google.maps.Map(document.getElementById('map-slackline'), mapOptions); // cria a nova Info Window com referência à variável infowindow // o conteúdo da Info Window será atribuído mais tarde infoWindow = new google.maps.InfoWindow(); // evento que fecha a infoWindow com click no mapa google.maps.event.addListener(map, 'click', function() { infoWindow.close(); }); // Chamada para a função que vai percorrer a informação // contida na variável markersData e criar os marcadores a mostrar no mapa displayMarkers(); } google.maps.event.addDomListener(window, 'load', initialize); // Esta função vai percorrer a informação contida na variável markersData // e cria os marcadores através da função createMarker function displayMarkers(){ // esta variável vai definir a área de mapa a abranger e o nível do zoom // de acordo com as posições dos marcadores var bounds = new google.maps.LatLngBounds(); // Loop que vai estruturar a informação contida em markersData // para que a função createMarker possa criar os marcadores for (var i = 0; i < markersData.length; i++){ var latlng = new google.maps.LatLng(markersData[i].lat, markersData[i].lng); var nome = markersData[i].nome; var endereco = markersData[i].endereco; var telefone = markersData[i].telefone; createMarker(latlng, nome, endereco, telefone); // Os valores de latitude e longitude do marcador são adicionados à // variável bounds bounds.extend(latlng); } // Depois de criados todos os marcadores // a API através da sua função fitBounds vai redefinir o nível do zoom // e consequentemente a área do mapa abrangida. map.fitBounds(bounds); } // Função que cria os marcadores e define o conteúdo de cada Info Window. function createMarker(latlng, nome, endereco, telefone){ var marker = new google.maps.Marker({ map: map, position: latlng, title: nome }); // Evento que dá instrução à API para estar alerta ao click no marcador. // Define o conteúdo e abre a Info Window. google.maps.event.addListener(marker, 'click', function() { // Variável que define a estrutura do HTML a inserir na Info Window. var iwContent = '<div id="iw_container">' + '<div class="iw_title">' + nome + '</div>' + '<div class="iw_content">' + endereco + '<br />' + telefone + '<br />'; // O conteúdo da variável iwContent é inserido na Info Window. infoWindow.setContent(iwContent); // A Info Window é aberta. infoWindow.open(map, marker); }); }<|fim▁end|>
<|file_name|>comment.py<|end_file_name|><|fim▁begin|># # livef1 # # f1comment.py - classes to store the live F1 comments # # Copyright (c) 2014 Marc Bertens <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # Special thanks to the live-f1 project 'https://launchpad.net/live-f1' # * Scott James Remnant # * Dave Pusey # # For showing the way of program logic. # import logging import time import datetime __version__ = "0.1" __applic__ = "Live F1 Web" __author__ = "Marc Bertens" class F1Text( object ): def __init__( self, ts = 0, c = '', t = '' ): self.timestamp = ts self.clock = c self.text = t return def reset( self ): self.timestamp = 0<|fim▁hole|>class F1Commentary( object ): def __init__( self, log ): self.lines = [] self.log = log return def reset( self ): self.lines = [] return def gethtml( self, div_tag_name ): output = "" for elem in self.lines: if elem.clock: sep = "-" else: sep = "" #endif output = "<tr valign='top'><td>%s</td><td>%s</td><td>%s</td></tr>" % ( elem.clock, sep, elem.text ) + output return """<div class="%s"><table>%s</table></div>""" % ( div_tag_name, output ) def append( self, new ): #self.log.info( "Commentary.time : %i" % ( new.timestamp ) ) #self.log.info( "Commentary.text : %s" % ( new.text ) ) if not new.clock: secs = new.timestamp % 60 mins = new.timestamp // 60 hours = 0 if ( mins > 60 ): hours = mins // 60 mins = mins % 60 # endif # add time stamp new.clock = "%02i:%02i" % ( hours, mins ) self.lines.append( F1Text( new.timestamp, new.clock, new.text ) ) return def dump( self ): for elem in self.lines: self.log.info( "Commentary : %s" % ( elem.text ) ) # next return comment = None def GetCommentary(): global comment if comment == None: comment = F1Commentary( logging.getLogger( 'live-f1' ) ) return comment # end def<|fim▁end|>
self.clock = "" self.text = "" return