file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
422.go
package main import "fmt" type Vertex struct { Lat, Long float64 } var m map[string]Vertex func
() { m = make(map[string]Vertex) m["Bell Labs"] = Vertex{40.68433, -74.39967} fmt.Println(m["Bell Labs"]) }
main
ld_hmis_report.py
from django.utils.translation import ugettext as _ from corehq.apps.locations.permissions import location_safe from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, NumericColumn from corehq.apps.reports.filters.select import MonthFilter, YearFilter from corehq.apps.reports.standard import MonthYearMixin from corehq.apps.reports.standard.cases.basic import CaseListReport from custom.common.filters import RestrictedAsyncLocationFilter from custom.m4change.reports import validate_report_parameters, get_location_hierarchy_by_id from custom.m4change.reports.reports import M4ChangeReport from custom.m4change.reports.sql_data import LdHmisCaseSqlData def _get_row(row_data, form_data, key): data = form_data.get(key) rows = dict([(row_key, data.get(row_key, 0)) for row_key in row_data]) for key in rows: if rows.get(key) == None: rows[key] = 0 return rows @location_safe class LdHmisReport(MonthYearMixin, CaseListReport, M4ChangeReport): ajax_pagination = False asynchronous = True exportable = True emailable = False name = "Facility L&D HMIS Report" slug = "facility_ld_hmis_report" default_rows = 25 base_template = "m4change/report.html" report_template_path = "m4change/report_content.html" fields = [ RestrictedAsyncLocationFilter, MonthFilter, YearFilter ] @classmethod def get_report_data(cls, config): validate_report_parameters(["domain", "location_id", "datespan"], config) domain = config["domain"] location_id = config["location_id"] user = config["user"] sql_data = LdHmisCaseSqlData(domain=domain, datespan=config["datespan"]).data locations = get_location_hierarchy_by_id(location_id, domain, user) row_data = LdHmisReport.get_initial_row_data() for location_id in locations: key = (domain, location_id) if key in sql_data: report_rows = _get_row(row_data, sql_data, key) for key in report_rows: row_data.get(key)["value"] += report_rows.get(key) return sorted([(key, row_data[key]) for key in row_data], key=lambda t: t[1].get("hmis_code")) @classmethod def get_initial_row_data(cls): return { "deliveries_total": { "hmis_code": 19, "label": _("Deliveries - Total"), "value": 0 }, "deliveries_svd_total": { "hmis_code": 20, "label": _("Deliveries - SVD"), "value": 0 }, "deliveries_assisted_total": { "hmis_code": 21, "label": _("Deliveries - Assisted"), "value": 0 }, "deliveries_caesarean_section_total": { "hmis_code": 22, "label": _("Deliveries caesarean section"), "value": 0 }, "deliveries_complications_total": { "hmis_code": 23, "label": _("Deliveries - Complications"), "value": 0 }, 'deliveries_preterm_total': { "hmis_code": 24, "label": _("Deliveries - Preterm"), "value": 0 }, 'deliveries_hiv_positive_women_total': { "hmis_code": 25, "label": _("Deliveries - HIV positive women"), "value": 0 }, 'live_birth_hiv_positive_women_total': { "hmis_code": 26, "label": _("LiveBirth - HIV positive women"), "value": 0 }, 'deliveries_hiv_positive_booked_women_total': { "hmis_code": 27, "label": _("Deliveries - HIV positive booked women"), "value": 0 }, 'deliveries_hiv_positive_unbooked_women_total': { "hmis_code": 28, "label": _("Deliveries - HIV positive unbooked women"), "value": 0 }, 'deliveries_monitored_using_partograph_total': {
}, 'tt1_total': { "hmis_code": 31, "label": _("TT1"), "value": 0 }, 'tt2_total': { "hmis_code": 32, "label": _("TT2"), "value": 0 }, 'live_births_male_female_total': { "hmis_code": 36, "label": _("Live Births(Male, Female, < 2.5kg, >= 2.5k g)"), "value": 0 }, 'male_lt_2_5kg_total': { "hmis_code": 36.1, "label": _("Male, < 2.5kg"), "value": 0 }, 'male_gte_2_5kg_total': { "hmis_code": 36.2, "label": _("Male, >= 2.5kg"), "value": 0 }, 'female_lt_2_5kg_total': { "hmis_code": 36.3, "label": _("Female, < 2.5kg"), "value": 0 }, 'female_gte_2_5kg_total': { "hmis_code": 36.4, "label": _("Female, >= 2.5kg"), "value": 0 }, 'still_births_total': { "hmis_code": 37, "label": _("Still Births total"), "value": 0 }, 'fresh_still_births_total': { "hmis_code": 38.1, "label": _("Fresh Still Births"), "value": 0 }, 'other_still_births_total': { "hmis_code": 38.2, "label": _("Other still Births"), "value": 0 }, 'abortion_induced_total': { "hmis_code": 39.1, "label": _("Abortion Induced"), "value": 0 }, 'other_abortions_total': { "hmis_code": 39.2, "label": _("Other Abortions"), "value": 0 }, 'total_abortions_total': { "hmis_code": 40, "label": _("Total Abortions"), "value": 0 }, 'birth_asphyxia_total': { "hmis_code": 41, "label": _("Birth Asphyxia - Total"), "value": 0 }, 'birth_asphyxia_male_total': { "hmis_code": 41.1, "label": _("Birth Asphyxia - Male"), "value": 0 }, 'birth_asphyxia_female_total': { "hmis_code": 41.2, "label": _("Birth Asphyxia - Female"), "value": 0 }, 'neonatal_sepsis_total': { "hmis_code": 42, "label": _("Neonatal Sepsis - Total"), "value": 0 }, 'neonatal_sepsis_male_total': { "hmis_code": 42.1, "label": _("Neonatal Sepsis - Male"), "value": 0 }, 'neonatal_sepsis_female_total': { "hmis_code": 42.2, "label": _("Neonatal Sepsis - Female"), "value": 0 }, 'neonatal_tetanus_total': { "hmis_code": 43, "label": _("Neonatal Tetanus - Total"), "value": 0 }, 'neonatal_tetanus_male_total': { "hmis_code": 43.1, "label": _("Neonatal Tetanus - Male"), "value": 0 }, 'neonatal_tetanus_female_total': { "hmis_code": 43.2, "label": _("Neonatal Tetanus - Female"), "value": 0 }, 'neonatal_jaundice_total': { "hmis_code": 44, "label": _("Neonatal Jaundice - Total"), "value": 0 }, 'neonatal_jaundice_male_total': { "hmis_code": 44.1, "label": _("Neonatal Jaundice - Male"), "value": 0 }, 'neonatal_jaundice_female_total': { "hmis_code": 44.2, "label": _("Neonatal Jaundice - Female"), "value": 0 }, 'low_birth_weight_babies_in_kmc_total': { "hmis_code": 45, "label": _("Low birth weight babies placed in KMC - Total"), "value": 0 }, 'low_birth_weight_babies_in_kmc_male_total': { "hmis_code": 45.1, "label": _("Low birth weight babies placed in KMC - Male"), "value": 0 }, 'low_birth_weight_babies_in_kmc_female_total': { "hmis_code": 45.2, "label": _("Low birth weight babies placed in KMC - Female"), "value": 0 } } @property def headers(self): headers = DataTablesHeader(NumericColumn(_("HMIS code")), DataTablesColumn(_("Data Point")), NumericColumn(_("Total"))) return headers @property def rows(self): row_data = LdHmisReport.get_report_data({ "location_id": self.request.GET.get("location_id", None), "datespan": self.datespan, "domain": str(self.domain), "user": self.request.couch_user }) for row in row_data: yield [ self.table_cell(row[1].get("hmis_code")), self.table_cell(row[1].get("label")), self.table_cell(row[1].get("value")) ] @property def rendered_report_title(self): return self.name
"hmis_code": 29, "label": _("Deliveries - Monitored using Partograph"), "value": 0 }, 'deliveries_skilled_birth_attendant_total': { "hmis_code": 30, "label": _("Deliveries taken by skilled birth attendant"), "value": 0
runner.rs
use crate::parser::statement::Statement; use crate::engine::ts_value::TSValue; use crate::lexer::token_kinds::arithmetic_operator::ArithmeticOperator; use crate::engine::environment::environment_record::EnvironmentRecord; use crate::engine::environment::environment_type::EnvironmentType; use crate::parser::value::PrimitiveValue; use crate::engine::environment::run_errors::RunError; use crate::engine::environment::environment_record_binding::EnvironmentRecordBinding; use crate::engine::expressions::function::Function; type RunResult = Result<TSValue, RunError>; pub struct Runner {} impl Runner { pub fn new() -> Self { Self {} } pub fn start(&mut self, statement: Statement) { let global_scope = &mut EnvironmentRecord::new(EnvironmentType::Global); if let Statement::Program(global_statements) = statement { for statement in global_statements { match self.run(statement, global_scope) { Ok(val) => { if val != TSValue::Undefined { println!("{}",val) } } Err(_) => {} } } } } fn run(&mut self, statement: Statement, scope: &mut EnvironmentRecord) -> RunResult { match statement { Statement::FunctionDeclaration(ref name, ref args, ref body) => { let ts_function = TSValue::Function( Function::new(body.clone(), args.clone()) ); let binding = EnvironmentRecordBinding { value: ts_function }; scope.records.insert(name.clone(),binding ); return Ok(TSValue::Undefined); }, Statement::Primitive(PrimitiveValue::Boolean(b)) => { Ok(TSValue::Boolean(b)) }, Statement::Primitive(PrimitiveValue::String(s)) => { Ok(TSValue::String(s)) }, Statement::ConstDeclaration(ref name, ref statements) => { let last_const_statement = statements .clone() .pop() .ok_or(RunError::Message("Variable reference error"))?; let const_declaration = self.run(last_const_statement, scope)?; let binding = EnvironmentRecordBinding { value: const_declaration }; scope.records.insert(name.clone(), binding); Ok(TSValue::Undefined) }, Statement::Call(ref name, ref args) => { let call_binding = scope.records.get(name).ok_or(RunError::Message("Call error - there is no function with this name"))?; let val = call_binding.value.clone(); if let TSValue::Function(val) = val
else { Err(RunError::Message("Call error")) } } Statement::VariableRef(ref name) => { let test = scope.records.get(name).and_then(|v| Option::from(v.value.clone())).ok_or(RunError::Message("Variable reference error"))?; Ok(test) } Statement::Primitive(PrimitiveValue::Num(n)) => Ok(TSValue::Num(n)), Statement::ArithmeticOperation(op, first_val, second_val) => { let first_val = self.run(*first_val, scope).or(Err(RunError::Message("Wrong first arithmetic value")))?; let second_val = self.run(*second_val, scope).or(Err(RunError::Message("Wrong second arithmetic value")))?; let calculated_val = self.run_arithmetic_operation(op, first_val, second_val); Ok(calculated_val) } Statement::Return(v) => { self.run(*v, scope) } v => { println!("not found {}", v); Err(RunError::Message("Not found statement to run")) } } } fn run_arithmetic_operation(&self, op: ArithmeticOperator, first_val: TSValue, second_val: TSValue) -> TSValue { return match op { ArithmeticOperator::PLUS => first_val + second_val, ArithmeticOperator::MINUS => first_val - second_val, ArithmeticOperator::MULTIPLICATION => first_val * second_val, ArithmeticOperator::DIVISION => first_val / second_val }; } }
{ let func_expression = *val.clone().expression; let func_args = val.clone().args; if let Statement::Block(func_expression) = func_expression { let func_scope = &mut EnvironmentRecord::new(EnvironmentType::Function); for (count, arg) in func_args.iter().enumerate() { if let Statement::TypedParameter(name, _) = arg { let passed_arg_value = match args.get(count) { Some(arg_statement) => self.run(arg_statement.clone(), scope)?, None => return Err(RunError::Message("Call error")) }; func_scope.records.insert(name.clone(), EnvironmentRecordBinding { value: passed_arg_value }); } } for statement in func_expression { let function_value = self.run(statement.clone(), func_scope)?; if let Statement::Return(_) = statement { return Ok(function_value); } } } Ok(TSValue::Undefined) }
api.go
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package polly import ( "io" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) const opDeleteLexicon = "DeleteLexicon" // DeleteLexiconRequest generates a "aws/request.Request" representing the // client's request for the DeleteLexicon operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteLexicon for more information on using the DeleteLexicon // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteLexiconRequest method. // req, resp := client.DeleteLexiconRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/DeleteLexicon func (c *Polly) DeleteLexiconRequest(input *DeleteLexiconInput) (req *request.Request, output *DeleteLexiconOutput) { op := &request.Operation{ Name: opDeleteLexicon, HTTPMethod: "DELETE", HTTPPath: "/v1/lexicons/{LexiconName}", } if input == nil { input = &DeleteLexiconInput{} } output = &DeleteLexiconOutput{} req = c.newRequest(op, input, output) return } // DeleteLexicon API operation for Amazon Polly. // // Deletes the specified pronunciation lexicon stored in an AWS Region. A lexicon // which has been deleted is not available for speech synthesis, nor is it possible // to retrieve it using either the GetLexicon or ListLexicon APIs. // // For more information, see Managing Lexicons (http://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Polly's // API operation DeleteLexicon for usage and error information. // // Returned Error Codes: // * ErrCodeLexiconNotFoundException "LexiconNotFoundException" // Amazon Polly can't find the specified lexicon. This could be caused by a // lexicon that is missing, its name is misspelled or specifying a lexicon that // is in a different region. // // Verify that the lexicon exists, is in the region (see ListLexicons) and that // you spelled its name is spelled correctly. Then try again. // // * ErrCodeServiceFailureException "ServiceFailureException" // An unknown condition has caused a service failure. // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/DeleteLexicon func (c *Polly) DeleteLexicon(input *DeleteLexiconInput) (*DeleteLexiconOutput, error) { req, out := c.DeleteLexiconRequest(input) return out, req.Send() } // DeleteLexiconWithContext is the same as DeleteLexicon with the addition of // the ability to pass a context and additional request options. // // See DeleteLexicon for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Polly) DeleteLexiconWithContext(ctx aws.Context, input *DeleteLexiconInput, opts ...request.Option) (*DeleteLexiconOutput, error) { req, out := c.DeleteLexiconRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeVoices = "DescribeVoices" // DescribeVoicesRequest generates a "aws/request.Request" representing the // client's request for the DescribeVoices operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeVoices for more information on using the DescribeVoices // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeVoicesRequest method. // req, resp := client.DescribeVoicesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/DescribeVoices func (c *Polly) DescribeVoicesRequest(input *DescribeVoicesInput) (req *request.Request, output *DescribeVoicesOutput) { op := &request.Operation{ Name: opDescribeVoices, HTTPMethod: "GET", HTTPPath: "/v1/voices", } if input == nil { input = &DescribeVoicesInput{} } output = &DescribeVoicesOutput{} req = c.newRequest(op, input, output) return } // DescribeVoices API operation for Amazon Polly. // // Returns the list of voices that are available for use when requesting speech // synthesis. Each voice speaks a specified language, is either male or female, // and is identified by an ID, which is the ASCII version of the voice name. // // When synthesizing speech ( SynthesizeSpeech ), you provide the voice ID for // the voice you want from the list of voices returned by DescribeVoices. // // For example, you want your news reader application to read news in a specific // language, but giving a user the option to choose the voice. Using the DescribeVoices // operation you can provide the user with a list of available voices to select // from. // // You can optionally specify a language code to filter the available voices. // For example, if you specify en-US, the operation returns a list of all available // US English voices. // // This operation requires permissions to perform the polly:DescribeVoices action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Polly's // API operation DescribeVoices for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The NextToken is invalid. Verify that it's spelled correctly, and then try // again. // // * ErrCodeServiceFailureException "ServiceFailureException" // An unknown condition has caused a service failure. // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/DescribeVoices func (c *Polly) DescribeVoices(input *DescribeVoicesInput) (*DescribeVoicesOutput, error) { req, out := c.DescribeVoicesRequest(input) return out, req.Send() } // DescribeVoicesWithContext is the same as DescribeVoices with the addition of // the ability to pass a context and additional request options. // // See DescribeVoices for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Polly) DescribeVoicesWithContext(ctx aws.Context, input *DescribeVoicesInput, opts ...request.Option) (*DescribeVoicesOutput, error) { req, out := c.DescribeVoicesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opGetLexicon = "GetLexicon" // GetLexiconRequest generates a "aws/request.Request" representing the // client's request for the GetLexicon operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See GetLexicon for more information on using the GetLexicon // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the GetLexiconRequest method. // req, resp := client.GetLexiconRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/GetLexicon func (c *Polly) GetLexiconRequest(input *GetLexiconInput) (req *request.Request, output *GetLexiconOutput) { op := &request.Operation{ Name: opGetLexicon, HTTPMethod: "GET", HTTPPath: "/v1/lexicons/{LexiconName}", } if input == nil { input = &GetLexiconInput{} } output = &GetLexiconOutput{} req = c.newRequest(op, input, output) return } // GetLexicon API operation for Amazon Polly. // // Returns the content of the specified pronunciation lexicon stored in an AWS // Region. For more information, see Managing Lexicons (http://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Polly's // API operation GetLexicon for usage and error information. // // Returned Error Codes: // * ErrCodeLexiconNotFoundException "LexiconNotFoundException" // Amazon Polly can't find the specified lexicon. This could be caused by a // lexicon that is missing, its name is misspelled or specifying a lexicon that // is in a different region. // // Verify that the lexicon exists, is in the region (see ListLexicons) and that // you spelled its name is spelled correctly. Then try again. // // * ErrCodeServiceFailureException "ServiceFailureException" // An unknown condition has caused a service failure. // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/GetLexicon func (c *Polly) GetLexicon(input *GetLexiconInput) (*GetLexiconOutput, error) { req, out := c.GetLexiconRequest(input) return out, req.Send() } // GetLexiconWithContext is the same as GetLexicon with the addition of // the ability to pass a context and additional request options. // // See GetLexicon for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Polly) GetLexiconWithContext(ctx aws.Context, input *GetLexiconInput, opts ...request.Option) (*GetLexiconOutput, error) { req, out := c.GetLexiconRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opListLexicons = "ListLexicons" // ListLexiconsRequest generates a "aws/request.Request" representing the // client's request for the ListLexicons operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListLexicons for more information on using the ListLexicons // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListLexiconsRequest method. // req, resp := client.ListLexiconsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/ListLexicons func (c *Polly) ListLexiconsRequest(input *ListLexiconsInput) (req *request.Request, output *ListLexiconsOutput) { op := &request.Operation{ Name: opListLexicons, HTTPMethod: "GET", HTTPPath: "/v1/lexicons", } if input == nil { input = &ListLexiconsInput{} } output = &ListLexiconsOutput{} req = c.newRequest(op, input, output) return } // ListLexicons API operation for Amazon Polly. // // Returns a list of pronunciation lexicons stored in an AWS Region. For more // information, see Managing Lexicons (http://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Polly's // API operation ListLexicons for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidNextTokenException "InvalidNextTokenException" // The NextToken is invalid. Verify that it's spelled correctly, and then try // again. // // * ErrCodeServiceFailureException "ServiceFailureException" // An unknown condition has caused a service failure. // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/ListLexicons func (c *Polly) ListLexicons(input *ListLexiconsInput) (*ListLexiconsOutput, error) { req, out := c.ListLexiconsRequest(input) return out, req.Send() } // ListLexiconsWithContext is the same as ListLexicons with the addition of // the ability to pass a context and additional request options. // // See ListLexicons for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Polly) ListLexiconsWithContext(ctx aws.Context, input *ListLexiconsInput, opts ...request.Option) (*ListLexiconsOutput, error) { req, out := c.ListLexiconsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opPutLexicon = "PutLexicon" // PutLexiconRequest generates a "aws/request.Request" representing the // client's request for the PutLexicon operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See PutLexicon for more information on using the PutLexicon // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the PutLexiconRequest method. // req, resp := client.PutLexiconRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/PutLexicon func (c *Polly) PutLexiconRequest(input *PutLexiconInput) (req *request.Request, output *PutLexiconOutput) { op := &request.Operation{ Name: opPutLexicon, HTTPMethod: "PUT", HTTPPath: "/v1/lexicons/{LexiconName}", } if input == nil { input = &PutLexiconInput{} } output = &PutLexiconOutput{} req = c.newRequest(op, input, output) return } // PutLexicon API operation for Amazon Polly. // // Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same // name already exists in the region, it is overwritten by the new lexicon. // Lexicon operations have eventual consistency, therefore, it might take some // time before the lexicon is available to the SynthesizeSpeech operation. // // For more information, see Managing Lexicons (http://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Polly's // API operation PutLexicon for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidLexiconException "InvalidLexiconException" // Amazon Polly can't find the specified lexicon. Verify that the lexicon's // name is spelled correctly, and then try again. // // * ErrCodeUnsupportedPlsAlphabetException "UnsupportedPlsAlphabetException" // The alphabet specified by the lexicon is not a supported alphabet. Valid // values are x-sampa and ipa. // // * ErrCodeUnsupportedPlsLanguageException "UnsupportedPlsLanguageException" // The language specified in the lexicon is unsupported. For a list of supported // languages, see Lexicon Attributes (http://docs.aws.amazon.com/polly/latest/dg/API_LexiconAttributes.html). // // * ErrCodeLexiconSizeExceededException "LexiconSizeExceededException" // The maximum size of the specified lexicon would be exceeded by this operation. // // * ErrCodeMaxLexemeLengthExceededException "MaxLexemeLengthExceededException" // The maximum size of the lexeme would be exceeded by this operation. // // * ErrCodeMaxLexiconsNumberExceededException "MaxLexiconsNumberExceededException" // The maximum number of lexicons would be exceeded by this operation. // // * ErrCodeServiceFailureException "ServiceFailureException" // An unknown condition has caused a service failure. // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/PutLexicon func (c *Polly) PutLexicon(input *PutLexiconInput) (*PutLexiconOutput, error) { req, out := c.PutLexiconRequest(input) return out, req.Send() } // PutLexiconWithContext is the same as PutLexicon with the addition of // the ability to pass a context and additional request options. // // See PutLexicon for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Polly) PutLexiconWithContext(ctx aws.Context, input *PutLexiconInput, opts ...request.Option) (*PutLexiconOutput, error) { req, out := c.PutLexiconRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opSynthesizeSpeech = "SynthesizeSpeech" // SynthesizeSpeechRequest generates a "aws/request.Request" representing the // client's request for the SynthesizeSpeech operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See SynthesizeSpeech for more information on using the SynthesizeSpeech // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the SynthesizeSpeechRequest method. // req, resp := client.SynthesizeSpeechRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/SynthesizeSpeech func (c *Polly) SynthesizeSpeechRequest(input *SynthesizeSpeechInput) (req *request.Request, output *SynthesizeSpeechOutput) { op := &request.Operation{ Name: opSynthesizeSpeech, HTTPMethod: "POST", HTTPPath: "/v1/speech", } if input == nil { input = &SynthesizeSpeechInput{} } output = &SynthesizeSpeechOutput{} req = c.newRequest(op, input, output) return } // SynthesizeSpeech API operation for Amazon Polly. // // Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML input // must be valid, well-formed SSML. Some alphabets might not be available with // all the voices (for example, Cyrillic might not be read at all by English // voices) unless phoneme mapping is used. For more information, see How it // Works (http://docs.aws.amazon.com/polly/latest/dg/how-text-to-speech-works.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Polly's // API operation SynthesizeSpeech for usage and error information. // // Returned Error Codes: // * ErrCodeTextLengthExceededException "TextLengthExceededException" // The value of the "Text" parameter is longer than the accepted limits. The // limit for input text is a maximum of 3000 characters total, of which no more // than 1500 can be billed characters. SSML tags are not counted as billed characters. // // * ErrCodeInvalidSampleRateException "InvalidSampleRateException" // The specified sample rate is not valid. // // * ErrCodeInvalidSsmlException "InvalidSsmlException" // The SSML you provided is invalid. Verify the SSML syntax, spelling of tags // and values, and then try again. // // * ErrCodeLexiconNotFoundException "LexiconNotFoundException" // Amazon Polly can't find the specified lexicon. This could be caused by a // lexicon that is missing, its name is misspelled or specifying a lexicon that // is in a different region. // // Verify that the lexicon exists, is in the region (see ListLexicons) and that // you spelled its name is spelled correctly. Then try again. // // * ErrCodeServiceFailureException "ServiceFailureException" // An unknown condition has caused a service failure. // // * ErrCodeMarksNotSupportedForFormatException "MarksNotSupportedForFormatException" // Speech marks are not supported for the OutputFormat selected. Speech marks // are only available for content in json format. // // * ErrCodeSsmlMarksNotSupportedForTextTypeException "SsmlMarksNotSupportedForTextTypeException" // SSML speech marks are not supported for plain text-type input. // // See also, https://docs.aws.amazon.com/goto/WebAPI/polly-2016-06-10/SynthesizeSpeech func (c *Polly) SynthesizeSpeech(input *SynthesizeSpeechInput) (*SynthesizeSpeechOutput, error) { req, out := c.SynthesizeSpeechRequest(input) return out, req.Send() } // SynthesizeSpeechWithContext is the same as SynthesizeSpeech with the addition of // the ability to pass a context and additional request options. // // See SynthesizeSpeech for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Polly) SynthesizeSpeechWithContext(ctx aws.Context, input *SynthesizeSpeechInput, opts ...request.Option) (*SynthesizeSpeechOutput, error) { req, out := c.SynthesizeSpeechRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type DeleteLexiconInput struct { _ struct{} `type:"structure"` // The name of the lexicon to delete. Must be an existing lexicon in the region. // // Name is a required field Name *string `location:"uri" locationName:"LexiconName" type:"string" required:"true"` } // String returns the string representation func (s DeleteLexiconInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteLexiconInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteLexiconInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteLexiconInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetName sets the Name field's value. func (s *DeleteLexiconInput) SetName(v string) *DeleteLexiconInput { s.Name = &v return s } type DeleteLexiconOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s DeleteLexiconOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteLexiconOutput) GoString() string { return s.String() } type DescribeVoicesInput struct { _ struct{} `type:"structure"` // The language identification tag (ISO 639 code for the language name-ISO 3166 // country code) for filtering the list of voices returned. If you don't specify // this optional parameter, all available voices are returned. LanguageCode *string `location:"querystring" locationName:"LanguageCode" type:"string" enum:"LanguageCode"` // An opaque pagination token returned from the previous DescribeVoices operation. // If present, this indicates where to continue the listing. NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` } // String returns the string representation func (s DescribeVoicesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeVoicesInput) GoString() string { return s.String() } // SetLanguageCode sets the LanguageCode field's value. func (s *DescribeVoicesInput) SetLanguageCode(v string) *DescribeVoicesInput { s.LanguageCode = &v return s } // SetNextToken sets the NextToken field's value. func (s *DescribeVoicesInput) SetNextToken(v string) *DescribeVoicesInput { s.NextToken = &v return s } type DescribeVoicesOutput struct { _ struct{} `type:"structure"` // The pagination token to use in the next request to continue the listing of // voices. NextToken is returned only if the response is truncated. NextToken *string `type:"string"` // A list of voices with their properties. Voices []*Voice `type:"list"` } // String returns the string representation func (s DescribeVoicesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeVoicesOutput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. func (s *DescribeVoicesOutput) SetNextToken(v string) *DescribeVoicesOutput { s.NextToken = &v return s } // SetVoices sets the Voices field's value. func (s *DescribeVoicesOutput) SetVoices(v []*Voice) *DescribeVoicesOutput { s.Voices = v return s } type GetLexiconInput struct { _ struct{} `type:"structure"` // Name of the lexicon. // // Name is a required field Name *string `location:"uri" locationName:"LexiconName" type:"string" required:"true"` } // String returns the string representation func (s GetLexiconInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetLexiconInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *GetLexiconInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetLexiconInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetName sets the Name field's value. func (s *GetLexiconInput) SetName(v string) *GetLexiconInput { s.Name = &v return s } type GetLexiconOutput struct { _ struct{} `type:"structure"` // Lexicon object that provides name and the string content of the lexicon. Lexicon *Lexicon `type:"structure"` // Metadata of the lexicon, including phonetic alphabetic used, language code, // lexicon ARN, number of lexemes defined in the lexicon, and size of lexicon // in bytes. LexiconAttributes *LexiconAttributes `type:"structure"` } // String returns the string representation func (s GetLexiconOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetLexiconOutput) GoString() string { return s.String() } // SetLexicon sets the Lexicon field's value. func (s *GetLexiconOutput) SetLexicon(v *Lexicon) *GetLexiconOutput { s.Lexicon = v return s } // SetLexiconAttributes sets the LexiconAttributes field's value. func (s *GetLexiconOutput) SetLexiconAttributes(v *LexiconAttributes) *GetLexiconOutput { s.LexiconAttributes = v return s } // Provides lexicon name and lexicon content in string format. For more information, // see Pronunciation Lexicon Specification (PLS) Version 1.0 (https://www.w3.org/TR/pronunciation-lexicon/). type Lexicon struct { _ struct{} `type:"structure"` // Lexicon content in string format. The content of a lexicon must be in PLS // format. Content *string `type:"string"` // Name of the lexicon. Name *string `type:"string"` } // String returns the string representation func (s Lexicon) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Lexicon) GoString() string { return s.String() } // SetContent sets the Content field's value. func (s *Lexicon) SetContent(v string) *Lexicon { s.Content = &v return s } // SetName sets the Name field's value. func (s *Lexicon) SetName(v string) *Lexicon { s.Name = &v return s } // Contains metadata describing the lexicon such as the number of lexemes, language // code, and so on. For more information, see Managing Lexicons (http://docs.aws.amazon.com/polly/latest/dg/managing-lexicons.html). type LexiconAttributes struct { _ struct{} `type:"structure"` // Phonetic alphabet used in the lexicon. Valid values are ipa and x-sampa. Alphabet *string `type:"string"` // Language code that the lexicon applies to. A lexicon with a language code // such as "en" would be applied to all English languages (en-GB, en-US, en-AUS, // en-WLS, and so on. LanguageCode *string `type:"string" enum:"LanguageCode"` // Date lexicon was last modified (a timestamp value). LastModified *time.Time `type:"timestamp" timestampFormat:"unix"` // Number of lexemes in the lexicon. LexemesCount *int64 `type:"integer"` // Amazon Resource Name (ARN) of the lexicon. LexiconArn *string `type:"string"` // Total size of the lexicon, in characters. Size *int64 `type:"integer"` } // String returns the string representation func (s LexiconAttributes) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s LexiconAttributes) GoString() string { return s.String() } // SetAlphabet sets the Alphabet field's value. func (s *LexiconAttributes) SetAlphabet(v string) *LexiconAttributes { s.Alphabet = &v return s } // SetLanguageCode sets the LanguageCode field's value. func (s *LexiconAttributes) SetLanguageCode(v string) *LexiconAttributes { s.LanguageCode = &v return s } // SetLastModified sets the LastModified field's value. func (s *LexiconAttributes) SetLastModified(v time.Time) *LexiconAttributes { s.LastModified = &v return s } // SetLexemesCount sets the LexemesCount field's value. func (s *LexiconAttributes) SetLexemesCount(v int64) *LexiconAttributes { s.LexemesCount = &v return s } // SetLexiconArn sets the LexiconArn field's value. func (s *LexiconAttributes) SetLexiconArn(v string) *LexiconAttributes { s.LexiconArn = &v return s } // SetSize sets the Size field's value. func (s *LexiconAttributes) SetSize(v int64) *LexiconAttributes { s.Size = &v return s } // Describes the content of the lexicon. type LexiconDescription struct { _ struct{} `type:"structure"` // Provides lexicon metadata. Attributes *LexiconAttributes `type:"structure"` // Name of the lexicon. Name *string `type:"string"` } // String returns the string representation func (s LexiconDescription) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s LexiconDescription) GoString() string { return s.String() } // SetAttributes sets the Attributes field's value. func (s *LexiconDescription) SetAttributes(v *LexiconAttributes) *LexiconDescription { s.Attributes = v return s } // SetName sets the Name field's value. func (s *LexiconDescription) SetName(v string) *LexiconDescription { s.Name = &v return s } type ListLexiconsInput struct { _ struct{} `type:"structure"` // An opaque pagination token returned from previous ListLexicons operation. // If present, indicates where to continue the list of lexicons. NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` } // String returns the string representation func (s ListLexiconsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListLexiconsInput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. func (s *ListLexiconsInput) SetNextToken(v string) *ListLexiconsInput { s.NextToken = &v return s } type ListLexiconsOutput struct { _ struct{} `type:"structure"` // A list of lexicon names and attributes. Lexicons []*LexiconDescription `type:"list"` // The pagination token to use in the next request to continue the listing of // lexicons. NextToken is returned only if the response is truncated. NextToken *string `type:"string"` } // String returns the string representation func (s ListLexiconsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListLexiconsOutput) GoString() string { return s.String() } // SetLexicons sets the Lexicons field's value. func (s *ListLexiconsOutput) SetLexicons(v []*LexiconDescription) *ListLexiconsOutput { s.Lexicons = v return s } // SetNextToken sets the NextToken field's value. func (s *ListLexiconsOutput) SetNextToken(v string) *ListLexiconsOutput { s.NextToken = &v return s } type PutLexiconInput struct { _ struct{} `type:"structure"` // Content of the PLS lexicon as string data. // // Content is a required field Content *string `type:"string" required:"true"` // Name of the lexicon. The name must follow the regular express format [0-9A-Za-z]{1,20}. // That is, the name is a case-sensitive alphanumeric string up to 20 characters // long. // // Name is a required field Name *string `location:"uri" locationName:"LexiconName" type:"string" required:"true"` } // String returns the string representation func (s PutLexiconInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutLexiconInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *PutLexiconInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PutLexiconInput"} if s.Content == nil { invalidParams.Add(request.NewErrParamRequired("Content")) } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetContent sets the Content field's value. func (s *PutLexiconInput) SetContent(v string) *PutLexiconInput { s.Content = &v return s } // SetName sets the Name field's value. func (s *PutLexiconInput) SetName(v string) *PutLexiconInput { s.Name = &v return s } type PutLexiconOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s PutLexiconOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PutLexiconOutput) GoString() string { return s.String() } type SynthesizeSpeechInput struct { _ struct{} `type:"structure"` // List of one or more pronunciation lexicon names you want the service to apply // during synthesis. Lexicons are applied only if the language of the lexicon // is the same as the language of the voice. For information about storing lexicons, // see PutLexicon (http://docs.aws.amazon.com/polly/latest/dg/API_PutLexicon.html). LexiconNames []*string `type:"list"` // The format in which the returned output will be encoded. For audio stream, // this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json. // // OutputFormat is a required field OutputFormat *string `type:"string" required:"true" enum:"OutputFormat"` // The audio frequency specified in Hz. // // The valid values for mp3 and ogg_vorbis are "8000", "16000", and "22050". // The default value is "22050". // // Valid values for pcm are "8000" and "16000" The default value is "16000". SampleRate *string `type:"string"` // The type of speech marks returned for the input text. SpeechMarkTypes []*string `type:"list"` // Input text to synthesize. If you specify ssml as the TextType, follow the // SSML format for the input text. // // Text is a required field Text *string `type:"string" required:"true"` // Specifies whether the input text is plain text or SSML. The default value // is plain text. For more information, see Using SSML (http://docs.aws.amazon.com/polly/latest/dg/ssml.html). TextType *string `type:"string" enum:"TextType"` // Voice ID to use for the synthesis. You can get a list of available voice // IDs by calling the DescribeVoices (http://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html) // operation. // // VoiceId is a required field VoiceId *string `type:"string" required:"true" enum:"VoiceId"` } // String returns the string representation func (s SynthesizeSpeechInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SynthesizeSpeechInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *SynthesizeSpeechInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "SynthesizeSpeechInput"} if s.OutputFormat == nil { invalidParams.Add(request.NewErrParamRequired("OutputFormat")) } if s.Text == nil
if s.VoiceId == nil { invalidParams.Add(request.NewErrParamRequired("VoiceId")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetLexiconNames sets the LexiconNames field's value. func (s *SynthesizeSpeechInput) SetLexiconNames(v []*string) *SynthesizeSpeechInput { s.LexiconNames = v return s } // SetOutputFormat sets the OutputFormat field's value. func (s *SynthesizeSpeechInput) SetOutputFormat(v string) *SynthesizeSpeechInput { s.OutputFormat = &v return s } // SetSampleRate sets the SampleRate field's value. func (s *SynthesizeSpeechInput) SetSampleRate(v string) *SynthesizeSpeechInput { s.SampleRate = &v return s } // SetSpeechMarkTypes sets the SpeechMarkTypes field's value. func (s *SynthesizeSpeechInput) SetSpeechMarkTypes(v []*string) *SynthesizeSpeechInput { s.SpeechMarkTypes = v return s } // SetText sets the Text field's value. func (s *SynthesizeSpeechInput) SetText(v string) *SynthesizeSpeechInput { s.Text = &v return s } // SetTextType sets the TextType field's value. func (s *SynthesizeSpeechInput) SetTextType(v string) *SynthesizeSpeechInput { s.TextType = &v return s } // SetVoiceId sets the VoiceId field's value. func (s *SynthesizeSpeechInput) SetVoiceId(v string) *SynthesizeSpeechInput { s.VoiceId = &v return s } type SynthesizeSpeechOutput struct { _ struct{} `type:"structure" payload:"AudioStream"` // Stream containing the synthesized speech. AudioStream io.ReadCloser `type:"blob"` // Specifies the type audio stream. This should reflect the OutputFormat parameter // in your request. // // * If you request mp3 as the OutputFormat, the ContentType returned is // audio/mpeg. // // * If you request ogg_vorbis as the OutputFormat, the ContentType returned // is audio/ogg. // // * If you request pcm as the OutputFormat, the ContentType returned is // audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format. // // // * If you request json as the OutputFormat, the ContentType returned is // audio/json. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` // Number of characters synthesized. RequestCharacters *int64 `location:"header" locationName:"x-amzn-RequestCharacters" type:"integer"` } // String returns the string representation func (s SynthesizeSpeechOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SynthesizeSpeechOutput) GoString() string { return s.String() } // SetAudioStream sets the AudioStream field's value. func (s *SynthesizeSpeechOutput) SetAudioStream(v io.ReadCloser) *SynthesizeSpeechOutput { s.AudioStream = v return s } // SetContentType sets the ContentType field's value. func (s *SynthesizeSpeechOutput) SetContentType(v string) *SynthesizeSpeechOutput { s.ContentType = &v return s } // SetRequestCharacters sets the RequestCharacters field's value. func (s *SynthesizeSpeechOutput) SetRequestCharacters(v int64) *SynthesizeSpeechOutput { s.RequestCharacters = &v return s } // Description of the voice. type Voice struct { _ struct{} `type:"structure"` // Gender of the voice. Gender *string `type:"string" enum:"Gender"` // Amazon Polly assigned voice ID. This is the ID that you specify when calling // the SynthesizeSpeech operation. Id *string `type:"string" enum:"VoiceId"` // Language code of the voice. LanguageCode *string `type:"string" enum:"LanguageCode"` // Human readable name of the language in English. LanguageName *string `type:"string"` // Name of the voice (for example, Salli, Kendra, etc.). This provides a human // readable voice name that you might display in your application. Name *string `type:"string"` } // String returns the string representation func (s Voice) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Voice) GoString() string { return s.String() } // SetGender sets the Gender field's value. func (s *Voice) SetGender(v string) *Voice { s.Gender = &v return s } // SetId sets the Id field's value. func (s *Voice) SetId(v string) *Voice { s.Id = &v return s } // SetLanguageCode sets the LanguageCode field's value. func (s *Voice) SetLanguageCode(v string) *Voice { s.LanguageCode = &v return s } // SetLanguageName sets the LanguageName field's value. func (s *Voice) SetLanguageName(v string) *Voice { s.LanguageName = &v return s } // SetName sets the Name field's value. func (s *Voice) SetName(v string) *Voice { s.Name = &v return s } const ( // GenderFemale is a Gender enum value GenderFemale = "Female" // GenderMale is a Gender enum value GenderMale = "Male" ) const ( // LanguageCodeCyGb is a LanguageCode enum value LanguageCodeCyGb = "cy-GB" // LanguageCodeDaDk is a LanguageCode enum value LanguageCodeDaDk = "da-DK" // LanguageCodeDeDe is a LanguageCode enum value LanguageCodeDeDe = "de-DE" // LanguageCodeEnAu is a LanguageCode enum value LanguageCodeEnAu = "en-AU" // LanguageCodeEnGb is a LanguageCode enum value LanguageCodeEnGb = "en-GB" // LanguageCodeEnGbWls is a LanguageCode enum value LanguageCodeEnGbWls = "en-GB-WLS" // LanguageCodeEnIn is a LanguageCode enum value LanguageCodeEnIn = "en-IN" // LanguageCodeEnUs is a LanguageCode enum value LanguageCodeEnUs = "en-US" // LanguageCodeEsEs is a LanguageCode enum value LanguageCodeEsEs = "es-ES" // LanguageCodeEsUs is a LanguageCode enum value LanguageCodeEsUs = "es-US" // LanguageCodeFrCa is a LanguageCode enum value LanguageCodeFrCa = "fr-CA" // LanguageCodeFrFr is a LanguageCode enum value LanguageCodeFrFr = "fr-FR" // LanguageCodeIsIs is a LanguageCode enum value LanguageCodeIsIs = "is-IS" // LanguageCodeItIt is a LanguageCode enum value LanguageCodeItIt = "it-IT" // LanguageCodeKoKr is a LanguageCode enum value LanguageCodeKoKr = "ko-KR" // LanguageCodeJaJp is a LanguageCode enum value LanguageCodeJaJp = "ja-JP" // LanguageCodeNbNo is a LanguageCode enum value LanguageCodeNbNo = "nb-NO" // LanguageCodeNlNl is a LanguageCode enum value LanguageCodeNlNl = "nl-NL" // LanguageCodePlPl is a LanguageCode enum value LanguageCodePlPl = "pl-PL" // LanguageCodePtBr is a LanguageCode enum value LanguageCodePtBr = "pt-BR" // LanguageCodePtPt is a LanguageCode enum value LanguageCodePtPt = "pt-PT" // LanguageCodeRoRo is a LanguageCode enum value LanguageCodeRoRo = "ro-RO" // LanguageCodeRuRu is a LanguageCode enum value LanguageCodeRuRu = "ru-RU" // LanguageCodeSvSe is a LanguageCode enum value LanguageCodeSvSe = "sv-SE" // LanguageCodeTrTr is a LanguageCode enum value LanguageCodeTrTr = "tr-TR" ) const ( // OutputFormatJson is a OutputFormat enum value OutputFormatJson = "json" // OutputFormatMp3 is a OutputFormat enum value OutputFormatMp3 = "mp3" // OutputFormatOggVorbis is a OutputFormat enum value OutputFormatOggVorbis = "ogg_vorbis" // OutputFormatPcm is a OutputFormat enum value OutputFormatPcm = "pcm" ) const ( // SpeechMarkTypeSentence is a SpeechMarkType enum value SpeechMarkTypeSentence = "sentence" // SpeechMarkTypeSsml is a SpeechMarkType enum value SpeechMarkTypeSsml = "ssml" // SpeechMarkTypeViseme is a SpeechMarkType enum value SpeechMarkTypeViseme = "viseme" // SpeechMarkTypeWord is a SpeechMarkType enum value SpeechMarkTypeWord = "word" ) const ( // TextTypeSsml is a TextType enum value TextTypeSsml = "ssml" // TextTypeText is a TextType enum value TextTypeText = "text" ) const ( // VoiceIdGeraint is a VoiceId enum value VoiceIdGeraint = "Geraint" // VoiceIdGwyneth is a VoiceId enum value VoiceIdGwyneth = "Gwyneth" // VoiceIdMads is a VoiceId enum value VoiceIdMads = "Mads" // VoiceIdNaja is a VoiceId enum value VoiceIdNaja = "Naja" // VoiceIdHans is a VoiceId enum value VoiceIdHans = "Hans" // VoiceIdMarlene is a VoiceId enum value VoiceIdMarlene = "Marlene" // VoiceIdNicole is a VoiceId enum value VoiceIdNicole = "Nicole" // VoiceIdRussell is a VoiceId enum value VoiceIdRussell = "Russell" // VoiceIdAmy is a VoiceId enum value VoiceIdAmy = "Amy" // VoiceIdBrian is a VoiceId enum value VoiceIdBrian = "Brian" // VoiceIdEmma is a VoiceId enum value VoiceIdEmma = "Emma" // VoiceIdRaveena is a VoiceId enum value VoiceIdRaveena = "Raveena" // VoiceIdIvy is a VoiceId enum value VoiceIdIvy = "Ivy" // VoiceIdJoanna is a VoiceId enum value VoiceIdJoanna = "Joanna" // VoiceIdJoey is a VoiceId enum value VoiceIdJoey = "Joey" // VoiceIdJustin is a VoiceId enum value VoiceIdJustin = "Justin" // VoiceIdKendra is a VoiceId enum value VoiceIdKendra = "Kendra" // VoiceIdKimberly is a VoiceId enum value VoiceIdKimberly = "Kimberly" // VoiceIdMatthew is a VoiceId enum value VoiceIdMatthew = "Matthew" // VoiceIdSalli is a VoiceId enum value VoiceIdSalli = "Salli" // VoiceIdConchita is a VoiceId enum value VoiceIdConchita = "Conchita" // VoiceIdEnrique is a VoiceId enum value VoiceIdEnrique = "Enrique" // VoiceIdMiguel is a VoiceId enum value VoiceIdMiguel = "Miguel" // VoiceIdPenelope is a VoiceId enum value VoiceIdPenelope = "Penelope" // VoiceIdChantal is a VoiceId enum value VoiceIdChantal = "Chantal" // VoiceIdCeline is a VoiceId enum value VoiceIdCeline = "Celine" // VoiceIdMathieu is a VoiceId enum value VoiceIdMathieu = "Mathieu" // VoiceIdDora is a VoiceId enum value VoiceIdDora = "Dora" // VoiceIdKarl is a VoiceId enum value VoiceIdKarl = "Karl" // VoiceIdCarla is a VoiceId enum value VoiceIdCarla = "Carla" // VoiceIdGiorgio is a VoiceId enum value VoiceIdGiorgio = "Giorgio" // VoiceIdMizuki is a VoiceId enum value VoiceIdMizuki = "Mizuki" // VoiceIdLiv is a VoiceId enum value VoiceIdLiv = "Liv" // VoiceIdLotte is a VoiceId enum value VoiceIdLotte = "Lotte" // VoiceIdRuben is a VoiceId enum value VoiceIdRuben = "Ruben" // VoiceIdEwa is a VoiceId enum value VoiceIdEwa = "Ewa" // VoiceIdJacek is a VoiceId enum value VoiceIdJacek = "Jacek" // VoiceIdJan is a VoiceId enum value VoiceIdJan = "Jan" // VoiceIdMaja is a VoiceId enum value VoiceIdMaja = "Maja" // VoiceIdRicardo is a VoiceId enum value VoiceIdRicardo = "Ricardo" // VoiceIdVitoria is a VoiceId enum value VoiceIdVitoria = "Vitoria" // VoiceIdCristiano is a VoiceId enum value VoiceIdCristiano = "Cristiano" // VoiceIdInes is a VoiceId enum value VoiceIdInes = "Ines" // VoiceIdCarmen is a VoiceId enum value VoiceIdCarmen = "Carmen" // VoiceIdMaxim is a VoiceId enum value VoiceIdMaxim = "Maxim" // VoiceIdTatyana is a VoiceId enum value VoiceIdTatyana = "Tatyana" // VoiceIdAstrid is a VoiceId enum value VoiceIdAstrid = "Astrid" // VoiceIdFiliz is a VoiceId enum value VoiceIdFiliz = "Filiz" // VoiceIdVicki is a VoiceId enum value VoiceIdVicki = "Vicki" // VoiceIdTakumi is a VoiceId enum value VoiceIdTakumi = "Takumi" // VoiceIdSeoyeon is a VoiceId enum value VoiceIdSeoyeon = "Seoyeon" // VoiceIdAditi is a VoiceId enum value VoiceIdAditi = "Aditi" )
{ invalidParams.Add(request.NewErrParamRequired("Text")) }
separator_tool_item.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use crate::Align; use crate::Bin; use crate::Buildable; use crate::Container; use crate::ResizeMode; use crate::ToolItem; use crate::Widget; use glib::object::Cast; use glib::object::IsA; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use glib::StaticType; use glib::ToValue; use std::boxed::Box as Box_; use std::fmt; use std::mem::transmute; glib::wrapper! { #[doc(alias = "GtkSeparatorToolItem")] pub struct SeparatorToolItem(Object<ffi::GtkSeparatorToolItem, ffi::GtkSeparatorToolItemClass>) @extends ToolItem, Bin, Container, Widget, @implements Buildable; match fn { type_ => || ffi::gtk_separator_tool_item_get_type(), } } impl SeparatorToolItem { #[doc(alias = "gtk_separator_tool_item_new")] pub fn new() -> SeparatorToolItem { assert_initialized_main_thread!(); unsafe { ToolItem::from_glib_none(ffi::gtk_separator_tool_item_new()).unsafe_cast() } } // rustdoc-stripper-ignore-next /// Creates a new builder-pattern struct instance to construct [`SeparatorToolItem`] objects. /// /// This method returns an instance of [`SeparatorToolItemBuilder`] which can be used to create [`SeparatorToolItem`] objects. pub fn builder() -> SeparatorToolItemBuilder { SeparatorToolItemBuilder::default() } } impl Default for SeparatorToolItem { fn default() -> Self { Self::new() } } #[derive(Clone, Default)] // rustdoc-stripper-ignore-next /// A [builder-pattern] type to construct [`SeparatorToolItem`] objects. /// /// [builder-pattern]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html pub struct SeparatorToolItemBuilder { draw: Option<bool>, is_important: Option<bool>, visible_horizontal: Option<bool>, visible_vertical: Option<bool>, border_width: Option<u32>, child: Option<Widget>, resize_mode: Option<ResizeMode>, app_paintable: Option<bool>, can_default: Option<bool>, can_focus: Option<bool>, events: Option<gdk::EventMask>, expand: Option<bool>, #[cfg(any(feature = "v3_20", feature = "dox"))] #[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))] focus_on_click: Option<bool>, halign: Option<Align>, has_default: Option<bool>, has_focus: Option<bool>, has_tooltip: Option<bool>, height_request: Option<i32>, hexpand: Option<bool>, hexpand_set: Option<bool>, is_focus: Option<bool>, margin: Option<i32>, margin_bottom: Option<i32>, margin_end: Option<i32>, margin_start: Option<i32>, margin_top: Option<i32>, name: Option<String>, no_show_all: Option<bool>, opacity: Option<f64>, parent: Option<Container>, receives_default: Option<bool>, sensitive: Option<bool>, tooltip_markup: Option<String>, tooltip_text: Option<String>, valign: Option<Align>, vexpand: Option<bool>, vexpand_set: Option<bool>, visible: Option<bool>, width_request: Option<i32>, } impl SeparatorToolItemBuilder { // rustdoc-stripper-ignore-next /// Create a new [`SeparatorToolItemBuilder`]. pub fn new() -> Self { Self::default() } // rustdoc-stripper-ignore-next /// Build the [`SeparatorToolItem`]. pub fn build(self) -> SeparatorToolItem { let mut properties: Vec<(&str, &dyn ToValue)> = vec![]; if let Some(ref draw) = self.draw { properties.push(("draw", draw)); } if let Some(ref is_important) = self.is_important { properties.push(("is-important", is_important)); } if let Some(ref visible_horizontal) = self.visible_horizontal { properties.push(("visible-horizontal", visible_horizontal)); } if let Some(ref visible_vertical) = self.visible_vertical { properties.push(("visible-vertical", visible_vertical)); } if let Some(ref border_width) = self.border_width { properties.push(("border-width", border_width)); } if let Some(ref child) = self.child { properties.push(("child", child)); } if let Some(ref resize_mode) = self.resize_mode { properties.push(("resize-mode", resize_mode)); } if let Some(ref app_paintable) = self.app_paintable { properties.push(("app-paintable", app_paintable)); } if let Some(ref can_default) = self.can_default { properties.push(("can-default", can_default)); } if let Some(ref can_focus) = self.can_focus { properties.push(("can-focus", can_focus)); } if let Some(ref events) = self.events { properties.push(("events", events)); } if let Some(ref expand) = self.expand { properties.push(("expand", expand)); } #[cfg(any(feature = "v3_20", feature = "dox"))] if let Some(ref focus_on_click) = self.focus_on_click { properties.push(("focus-on-click", focus_on_click)); } if let Some(ref halign) = self.halign { properties.push(("halign", halign)); } if let Some(ref has_default) = self.has_default { properties.push(("has-default", has_default)); } if let Some(ref has_focus) = self.has_focus { properties.push(("has-focus", has_focus)); } if let Some(ref has_tooltip) = self.has_tooltip { properties.push(("has-tooltip", has_tooltip)); } if let Some(ref height_request) = self.height_request { properties.push(("height-request", height_request)); } if let Some(ref hexpand) = self.hexpand { properties.push(("hexpand", hexpand)); } if let Some(ref hexpand_set) = self.hexpand_set { properties.push(("hexpand-set", hexpand_set)); } if let Some(ref is_focus) = self.is_focus { properties.push(("is-focus", is_focus)); } if let Some(ref margin) = self.margin { properties.push(("margin", margin)); } if let Some(ref margin_bottom) = self.margin_bottom { properties.push(("margin-bottom", margin_bottom)); } if let Some(ref margin_end) = self.margin_end { properties.push(("margin-end", margin_end)); } if let Some(ref margin_start) = self.margin_start { properties.push(("margin-start", margin_start)); } if let Some(ref margin_top) = self.margin_top { properties.push(("margin-top", margin_top)); } if let Some(ref name) = self.name { properties.push(("name", name)); } if let Some(ref no_show_all) = self.no_show_all { properties.push(("no-show-all", no_show_all)); } if let Some(ref opacity) = self.opacity { properties.push(("opacity", opacity)); } if let Some(ref parent) = self.parent { properties.push(("parent", parent)); } if let Some(ref receives_default) = self.receives_default { properties.push(("receives-default", receives_default)); } if let Some(ref sensitive) = self.sensitive { properties.push(("sensitive", sensitive)); } if let Some(ref tooltip_markup) = self.tooltip_markup { properties.push(("tooltip-markup", tooltip_markup)); } if let Some(ref tooltip_text) = self.tooltip_text { properties.push(("tooltip-text", tooltip_text)); } if let Some(ref valign) = self.valign { properties.push(("valign", valign)); } if let Some(ref vexpand) = self.vexpand { properties.push(("vexpand", vexpand)); } if let Some(ref vexpand_set) = self.vexpand_set { properties.push(("vexpand-set", vexpand_set)); } if let Some(ref visible) = self.visible { properties.push(("visible", visible)); } if let Some(ref width_request) = self.width_request { properties.push(("width-request", width_request)); } glib::Object::new::<SeparatorToolItem>(&properties) .expect("Failed to create an instance of SeparatorToolItem") } pub fn draw(mut self, draw: bool) -> Self { self.draw = Some(draw); self } pub fn is_important(mut self, is_important: bool) -> Self { self.is_important = Some(is_important); self } pub fn visible_horizontal(mut self, visible_horizontal: bool) -> Self { self.visible_horizontal = Some(visible_horizontal); self } pub fn visible_vertical(mut self, visible_vertical: bool) -> Self { self.visible_vertical = Some(visible_vertical); self } pub fn border_width(mut self, border_width: u32) -> Self { self.border_width = Some(border_width); self } pub fn child(mut self, child: &impl IsA<Widget>) -> Self { self.child = Some(child.clone().upcast()); self } pub fn resize_mode(mut self, resize_mode: ResizeMode) -> Self { self.resize_mode = Some(resize_mode); self } pub fn app_paintable(mut self, app_paintable: bool) -> Self { self.app_paintable = Some(app_paintable); self } pub fn can_default(mut self, can_default: bool) -> Self { self.can_default = Some(can_default); self } pub fn can_focus(mut self, can_focus: bool) -> Self { self.can_focus = Some(can_focus); self } pub fn events(mut self, events: gdk::EventMask) -> Self { self.events = Some(events); self } pub fn expand(mut self, expand: bool) -> Self { self.expand = Some(expand); self } #[cfg(any(feature = "v3_20", feature = "dox"))] #[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))] pub fn focus_on_click(mut self, focus_on_click: bool) -> Self { self.focus_on_click = Some(focus_on_click); self } pub fn halign(mut self, halign: Align) -> Self { self.halign = Some(halign); self } pub fn has_default(mut self, has_default: bool) -> Self { self.has_default = Some(has_default); self } pub fn has_focus(mut self, has_focus: bool) -> Self { self.has_focus = Some(has_focus); self } pub fn has_tooltip(mut self, has_tooltip: bool) -> Self { self.has_tooltip = Some(has_tooltip); self } pub fn height_request(mut self, height_request: i32) -> Self { self.height_request = Some(height_request); self } pub fn hexpand(mut self, hexpand: bool) -> Self { self.hexpand = Some(hexpand); self } pub fn hexpand_set(mut self, hexpand_set: bool) -> Self { self.hexpand_set = Some(hexpand_set); self } pub fn is_focus(mut self, is_focus: bool) -> Self { self.is_focus = Some(is_focus); self } pub fn margin(mut self, margin: i32) -> Self { self.margin = Some(margin); self } pub fn margin_bottom(mut self, margin_bottom: i32) -> Self { self.margin_bottom = Some(margin_bottom); self } pub fn margin_end(mut self, margin_end: i32) -> Self { self.margin_end = Some(margin_end); self } pub fn margin_start(mut self, margin_start: i32) -> Self
pub fn margin_top(mut self, margin_top: i32) -> Self { self.margin_top = Some(margin_top); self } pub fn name(mut self, name: &str) -> Self { self.name = Some(name.to_string()); self } pub fn no_show_all(mut self, no_show_all: bool) -> Self { self.no_show_all = Some(no_show_all); self } pub fn opacity(mut self, opacity: f64) -> Self { self.opacity = Some(opacity); self } pub fn parent(mut self, parent: &impl IsA<Container>) -> Self { self.parent = Some(parent.clone().upcast()); self } pub fn receives_default(mut self, receives_default: bool) -> Self { self.receives_default = Some(receives_default); self } pub fn sensitive(mut self, sensitive: bool) -> Self { self.sensitive = Some(sensitive); self } pub fn tooltip_markup(mut self, tooltip_markup: &str) -> Self { self.tooltip_markup = Some(tooltip_markup.to_string()); self } pub fn tooltip_text(mut self, tooltip_text: &str) -> Self { self.tooltip_text = Some(tooltip_text.to_string()); self } pub fn valign(mut self, valign: Align) -> Self { self.valign = Some(valign); self } pub fn vexpand(mut self, vexpand: bool) -> Self { self.vexpand = Some(vexpand); self } pub fn vexpand_set(mut self, vexpand_set: bool) -> Self { self.vexpand_set = Some(vexpand_set); self } pub fn visible(mut self, visible: bool) -> Self { self.visible = Some(visible); self } pub fn width_request(mut self, width_request: i32) -> Self { self.width_request = Some(width_request); self } } impl SeparatorToolItem { pub const NONE: Option<&'static SeparatorToolItem> = None; } pub trait SeparatorToolItemExt: 'static { #[doc(alias = "gtk_separator_tool_item_get_draw")] #[doc(alias = "get_draw")] fn draws(&self) -> bool; #[doc(alias = "gtk_separator_tool_item_set_draw")] fn set_draw(&self, draw: bool); #[doc(alias = "draw")] fn connect_draw_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; } impl<O: IsA<SeparatorToolItem>> SeparatorToolItemExt for O { fn draws(&self) -> bool { unsafe { from_glib(ffi::gtk_separator_tool_item_get_draw( self.as_ref().to_glib_none().0, )) } } fn set_draw(&self, draw: bool) { unsafe { ffi::gtk_separator_tool_item_set_draw(self.as_ref().to_glib_none().0, draw.into_glib()); } } fn connect_draw_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_draw_trampoline< P: IsA<SeparatorToolItem>, F: Fn(&P) + 'static, >( this: *mut ffi::GtkSeparatorToolItem, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(SeparatorToolItem::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::draw\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_draw_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } } impl fmt::Display for SeparatorToolItem { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("SeparatorToolItem") } }
{ self.margin_start = Some(margin_start); self }
debugger_module.ts
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ import {CommonModule} from '@angular/common'; import {NgModule} from '@angular/core'; import {EffectsModule} from '@ngrx/effects'; import {StoreModule} from '@ngrx/store'; import {CoreModule} from '../../../webapp/core/core_module'; import {PluginRegistryModule} from '../../../webapp/plugins/plugin_registry_module'; import {Tfdbg2ServerDataSourceModule} from './data_source/tfdbg2_data_source_module'; import {DebuggerComponent} from './debugger_component'; import {DebuggerContainer} from './debugger_container'; import {DebuggerEffects} from './effects'; import {reducers} from './store/debugger_reducers'; import {DEBUGGER_FEATURE_KEY} from './store/debugger_types'; import {PLUGIN_ID} from './types'; import {AlertsModule} from './views/alerts/alerts_module'; import {GraphModule} from './views/graph/graph_module'; import {GraphExecutionsModule} from './views/graph_executions/graph_executions_module'; import {InactiveModule} from './views/inactive/inactive_module'; import {SourceFilesModule} from './views/source_files/source_files_module'; import {StackTraceModule} from './views/stack_trace/stack_trace_module'; import {TimelineModule} from './views/timeline/timeline_module'; @NgModule({ declarations: [DebuggerComponent, DebuggerContainer], imports: [ AlertsModule, CommonModule, CoreModule, GraphExecutionsModule, GraphModule, InactiveModule, SourceFilesModule, StackTraceModule, Tfdbg2ServerDataSourceModule, TimelineModule, StoreModule.forFeature(DEBUGGER_FEATURE_KEY, reducers), EffectsModule.forFeature([DebuggerEffects]), PluginRegistryModule.forPlugin(PLUGIN_ID, DebuggerContainer), ], exports: [DebuggerContainer], entryComponents: [DebuggerContainer], }) export class
{}
DebuggerModule
test43_tf_official.py
from models import Linear3 from core.Optimizers import sgd, bgd from core.Functions import one_hot_f import numpy as np from tensorflow import keras from core.Dataloader import batch_iterator def
(model, test_inputs, test_labels): num_of_sample = test_inputs.shape[0] cnt_correct, cnt_tot = 0, 0 for i in range(num_of_sample): test_input = test_inputs[i:i + 1] test_label = test_labels[i] res = model.forward_prop(test_input) if np.argmax(res) == np.argmax(test_label): cnt_correct += 1 cnt_tot += 1 return cnt_correct / cnt_tot fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() train_images = np.expand_dims(train_images / 255, axis=-1) test_images = np.expand_dims(test_images / 255, axis=-1) train_labels = one_hot_f(train_labels, num_classes=10) test_labels = one_hot_f(test_labels, num_classes=10) Linear3.compile() Linear3.cuda() train_iterator = batch_iterator(batch_sz=256) optimizer = bgd(0.01) optimizer.fit(Linear3, train_images, train_labels, train_iterator, epoch=50) Linear3.save('Linear3_cuda')
test
Intern.js
const Employee = require('./Employee'); class
extends Employee { constructor(name, email, id, school) { super(name, email, id); this.school = school; } getSchool() { const school = this.school; return school; } getRole() { const intern = 'Intern'; return intern; } }; module.exports = Intern
Intern
functions.go
package flow import ( "gotomate/log" ) // Write write string to clipboard func End(finished chan bool) { log.Plain("| Fiber Finished |") finished <- true } // Read read string from clipboard func Start(finished chan bool)
{ log.Plain("| Fiber Start |") finished <- true }
utils.rs
use std::env; use super::algorithm::StrPat; use std::io::prelude::*; use termion::{ color, style }; use rand::random; const DEFAULT_N: usize = 10; const DEFAULT_STRING_LEN: usize = 200; const DEFAULT_PATTERN_LEN: usize = 20; pub fn generate_string_of_size(size: usize) -> String { (0..size).map(|_| (0x61u8 + (random::<f32>() * 22.0) as u8) as char).collect() } pub fn generate_data(string_len: usize, pattern_len: usize) -> StrPat { StrPat::new(generate_string_of_size(string_len), generate_string_of_size(pattern_len)) } pub fn read_data() -> StrPat
rn.len(); let text = &data.string; for pos in result { println!("{}{}{}{}{}{}", &text[0..*pos], color::Bg(color::Green), style::Bold, &text[*pos..(*pos + patlen)], style::Reset, &text[(*pos + patlen)..]); } } pub fn get_n() -> usize { let n = env::var("N"); match n { Ok(n) => n.trim().parse().expect("Parse string to usize"), Err(_) => DEFAULT_N, } } pub fn get_string_len() -> usize { let string_len = env::var("STRING_LEN"); match string_len { Ok(string_len) => string_len.trim().parse().expect("Parse string to usize"), Err(_) => DEFAULT_STRING_LEN, } } pub fn get_pattern_len() -> usize { let pattern_len = env::var("PATTERN_LEN"); match pattern_len { Ok(pattern_len) => pattern_len.trim().parse().expect("Parse string to usize"), Err(_) => DEFAULT_PATTERN_LEN, } }
{ let mut string = String::new(); print!("Введите строку, в которой будет происходить поиск подстроки: "); std::io::stdout().flush().expect("Can't flush stdout"); std::io::stdin().read_line(&mut string).expect("Read string"); string.pop(); let mut pattern = String::new(); print!("Введите подстроку для поиска в строке: "); std::io::stdout().flush().expect("Can't flush stdout"); std::io::stdin().read_line(&mut pattern).expect("Read string"); pattern.pop(); StrPat::new(string, pattern) } pub fn show_result(result: &[usize], data: &StrPat) { let patlen = data.patte
service.go
/* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. */ package k8s import ( "context" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) func NewKubernetesService(name string, namespace string) *corev1.Service { return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, Spec: corev1.ServiceSpec{ Selector: map[string]string{ "app": name, }, Ports: []corev1.ServicePort{ { Port: 9080, }, }, }, } } func CreateKubernetesService(ctx context.Context, client client.Client, key types.NamespacedName, service *corev1.Service) error
{ err := client.Get(ctx, key, service) if err != nil { err = client.Create(ctx, service) } return err }
seckey.rs
#![cfg_attr(feature = "cargo-clippy", allow(blacklisted_name))] #![cfg(feature = "use_std")] extern crate seckey; use seckey::SecKey; #[test] fn seckey_read_then_read() { let secpass = SecKey::new(1).unwrap(); let rpass1 = secpass.read(); let rpass2 = secpass.read(); assert_eq!(1, *rpass1); assert_eq!(1, *rpass2); drop(rpass1); assert_eq!(1, *rpass2); } #[test] fn seckey_drop_test() { static mut X: usize = 0; #[derive(Debug)] struct Bar(usize); #[derive(Debug)] struct Baz<T>(T); impl Drop for Bar { fn drop(&mut self) { unsafe { X += 1; assert_eq!( self.0, if X == 2 { 3 } else { X } ); } } } { let bar = Bar(1); let bar2 = SecKey::new(bar).unwrap(); drop(bar2); } assert_eq!(unsafe { X }, 1); { let bar = Bar(3); let bar3 = unsafe { SecKey::from_ptr(&bar).unwrap() }; drop(bar); drop(bar3); } assert_eq!(unsafe { X }, 3); { let baz = Baz(Bar(4)); let baz2 = SecKey::new(baz).unwrap(); drop(baz2); } assert_eq!(unsafe { X }, 4); } #[test] fn test_seckey_ref()
{ pub struct Bar(u32); impl Drop for Bar { fn drop(&mut self) { assert_eq!(self.0, 0x42); self.0 += 1; } } let mut bar = Bar(0x42); { let _sk = SecKey::new(&mut bar).ok().unwrap(); } assert_eq!(bar.0, 0x42); }
router.js
import React, { Fragment } from 'react' import {BrowserRouter, Route, Routes} from "react-router-dom"; import ContentPage from '../components/content'; import { PrivateRoute } from '../components/privateRouter'; import Sidebar from '../components/sidebar'; import Categories from '../pages/categories'; import { Dashboard } from '../pages/dashboard'; import Home from '../pages/home'; import Logout from '../pages/login/logout'; import NotFound from '../pages/NotFound';
import Users from '../pages/users'; const Router = (props) => { return ( <BrowserRouter> <Sidebar theme={props.theme}/> <ContentPage theme={props.theme} content = { <Routes> <Fragment> <Route path="/" index element={ <Home /> } /> <Route path="/products" element={ <Products /> } /> <Route path="/products/:id" element={ <ViewProduct /> } /> <Route path="/categories" element={ <Categories /> } /> <Route exact path="/users" element={ <PrivateRoute /> }> <Route exact path="/users" element={ <Users /> } /> </Route> <Route exact path="/dashboard" element={ <PrivateRoute /> }> <Route exact path="/dashboard" element={ <Dashboard /> } /> </Route> <Route path="/logout" element={ <Logout /> } /> <Route path="*" element={ <NotFound /> } /> </Fragment> </Routes> } /> </BrowserRouter> ) } export default Router;
import Products from '../pages/products'; import { ViewProduct } from '../pages/products/ViewProduct';
stringClass.spec.js
describe('String class additional methods implementation', function() { 'use strict'; // English words that do not contain vowels // http://wordslisted.com/words/no-vowels/ // --- hasVowels method test --- describe('hasVowels method tests', function() { it('should return a boolean value', function() { expect(typeof 'Eugene Mutai'.hasVowels()).toBe('boolean'); }); it('Should returns true if the string contains vowels', function() { // should return true, which is a truthy value expect('Eugene'.hasVowels()).toBe(true); expect('Eugene Mutai'.hasVowels()).toBe(true); expect('1000 ryhmes'.hasVowels()).toBe(true); }); it('Should returns false if the string does not contains vowels', function() { // should return false, which is a falsy value expect(typeof 'Rythmn'.hasVowels()).toBe('boolean'); expect('flybys'.hasVowels()).toBeFalsy(); expect('CRYPTS'.hasVowels()).toBe(false); }); }); // --- toUpper method test --- describe('toUpper method tests', function() { it('should return a string value', function() { expect(typeof 'Ron weasley'.toUpper()).toBe('string'); }); it('should returns the String in question but with all characters in uppercases where applicable', function() { // should return a string all the words are all uppercase expect('Harry Potter'.toUpper()).toBe('HARRY POTTER'); expect('severus Snape'.toUpper()).toBe('SEVERUS SNAPE'); }); it('spaces/symbolic characters should be preserved/kept', function() { // matches any non-character or non-digit character. expect('Fluer Dela\'Cour (Co-operation)'.toUpper()).toMatch(/[^\w\s]{1,}/); }); }); // --- toLower method test --- describe('toLower method tests', function() { it('should return a string value', function() { expect(typeof 'ron WEASley'.toLower()).toBe('string'); }); it('should returns the String in question but with all characters in lowercases where applicable', function() { // should return a string all the words are all uppercase expect('HARRY POTTER'.toLower()).toBe('harry potter'); expect('Severus Snape'.toLower()).toBe('severus snape'); }); it('spaces/symbolic characters should be preserved/kept', function() { // matches any non-character or non-digit character. expect('Fluer Dela\'Cour (Co-operation)'.toLower()).toMatch(/[^\w\s]{1,}/); }); }); // --- ucFirst method test --- describe('ucFirst method tests', function() { it('should return a string value', function() { expect(typeof 'lamboghini'.ucFirst()).toBe('string'); }); it('should returns the String in question but changes the first character in uppercase', function() { // should return string with the 1st character uppercased, lowercasing the rest expect('lamboghini'.ucFirst()).toBe('Lamboghini'); expect('Lykan'.ucFirst()).toBe('Lykan'); expect('look AT That Car'.ucFirst()).toBe('Look at that car'); expect('WORTH 100,000'.ucFirst()).toBe('Worth 100,000'); }); }); // --- isQuestion method test --- describe('isQuestion method tests', function() { it('should return a boolean value', function() { expect(typeof 'Are you a developer?'.isQuestion()).toBe('boolean'); expect(typeof 'yes I am!!'.isQuestion()).toBe('boolean'); }); it('should return true if the string is a question (ending with a question mark)', function() { expect('Are you a developer?'.isQuestion()).toBe(true); expect('yes I am, checkout my git-repo.'.isQuestion()).toBe(false); expect('Huh? I did not know it\'s true!'.isQuestion()).toBe(false); }); }); // --- words method method test --- describe('words method tests', function() { it('should return instance of an array which is a type of object', function() { // demo sentense var wordsInSentence = 'The quick brown fox jumps over the lazy dog'; expect(Object.prototype.toString.call(wordsInSentence.words())).toBe('[object Array]'); }); it('Should returns a list of the words in the string, as an Array from string provided', function() { expect('Are you a developer?'.words()).toEqual(['Are', 'you', 'a', 'developer']); expect('He must have known!?! I want to leave you.'.words()).toEqual(['He', 'must', 'have', 'known', 'I', 'want', 'to', 'leave', 'you']); expect('You should test test test TESTS!!'.words()).toEqual(['You', 'should', 'test', 'TESTS']); }); }); // --- wordCount method test --- describe('wordCount method test', function() { // demo sentense var wordsInSentence = 'The quick brown fox jumps over the lazy dog'; it('should return a number value', function() { expect(typeof wordsInSentence.wordCount()).toBe('number'); }); it('should returns the number of words a string', function() { expect(wordsInSentence.wordCount()).toBe(9); expect('You should test test test TESTS!!'.wordCount()).toBe(4); }); // Will use a jasmine spy to carry out this test it('it must make use of the words method declared/and extended the string class', function() { // We are creating a spy which will pretend to be the words method of the String class spyOn(String.prototype, 'words').and.returnValue(['You', 'should', 'test', 'TESTS']); // Now call the wordCount function which uses the String.prototype.words() method // Which has to be called, before acquiring the number of words in a sentense wordsInSentence.wordCount(); expect(String.prototype.words).toHaveBeenCalled(); }); }); // --- toCurrency method test --- describe('toCurrency method tests', function() { it('should return a string value', function() { expect(typeof '11111.11'.toCurrency()).toBe('string'); }); it('should return the string unmorphed if has alphanumeric values', function() { expect('these three words'.toCurrency()).toBe('these three words'); expect('100 million'.toCurrency()).toBe('100 million'); }); it('Should returns a currency representation of the String', function() { expect('11111.11'.toCurrency()).toBe('11,111.11'); expect('110'.toCurrency()).toBe('110'); expect('9.08'.toCurrency()).toBe('9.08'); expect('1989.08'.toCurrency()).toBe('1,989.08'); expect('14500'.toCurrency()).toBe('14,500'); expect('14500.055'.toCurrency()).toBe('14,500.055'); expect('152000'.toCurrency()).toBe('152,000'); expect('1000000'.toCurrency()).toBe('1,000,000');
}); }); // --- fromCurrency method test --- describe('fromCurrency method tests', function() { it('should return a number value', function() { expect(typeof '11111.11'.fromCurrency()).toBe('number'); }); it('should return the string unmorphed if has alphanumeric values', function() { expect('these three words'.fromCurrency()).toBe('these three words'); expect('100 million'.fromCurrency()).toBe('100 million'); }); it('Should returns a number representation of the Currency String', function() { expect('11,111.11'.fromCurrency()).toBe(11111.11); expect('1,989.08'.fromCurrency()).toBe(1989.08); expect('1,000,000'.fromCurrency()).toBe(1000000); expect('1,234,567,890'.fromCurrency()).toBe(1234567890); expect('1,234,567,890.12345'.fromCurrency()).toBe(1234567890.12345); }); }); });
expect('1234567890'.toCurrency()).toBe('1,234,567,890'); expect('1234567890.12345'.toCurrency()).toBe('1,234,567,890.12345');
jinja_file.py
from __future__ import unicode_literals from frappe.model.document import Document import frappe from frappe.utils import flt,today from frappe import _ import decimal import json from datetime import datetime, timedelta @frappe.whitelist() def get_delivery_note_data(doc): data = frappe.db.sql("""select item_code,qty,number_of_pieces,wt_range from `tabDelivery Note Item` where parent ="%s" order by item_code"""%(doc.name)) data_aggregate = frappe.db.sql("""select item_code,sum(qty) as sumgrosswt,sum(number_of_pieces) as sum_qty from `tabDelivery Note Item` where parent ="%s" group by item_code"""%(doc.name)) table_height=0 sendata={} total_bundle=0 total_weight=0 data_to_send = "" last_data_aggregate_count =0 for i in data_aggregate: last_data_aggregate_count += 1 header = add_data("""<table style ='width :200px'><tbody><tr class='cls_003' style='border: 1px solid black;'><th colspan ='4' style='text-align:center' ><strong >%s</strong></th></tr>"""%(i[0]),table_height) table_height += 1 header += add_data("""<tr><td><strong>NO</strong></td><td><strong>Wt Range</strong></td><td><strong>Qty</strong></td><td><strong>Gross Wt</strong></td></tr>""",table_height) table_height += 1 count=1 for j in data: if j[0] == i[0]: header += add_data("""<tr><td>%s</td><td align="right">%s</td><td align="right">%s</td><td align="right">%s</td></tr>"""%(count,'{:.3f}'.format(round(j[3], 3)),j[2],'{:.3f}'.format(round(j[1], 3))),table_height) table_height += 1 count+=1 header += add_data("""<tr><td><strong>%s</strong></td><td align="left"><strong>%s</strong></td><td align="right"><strong>%s</strong></td><td align="right"><strong>%s</strong></td></tr></tbody></table>"""%(count-1,"Bun",'{:.0f}'.format(round(i[2], 0)),'{:.3f}'.format(round(i[1], 3))),table_height) table_height += 1 if last_data_aggregate_count == len(data_aggregate): header += add_data("""</div><p align='justify'>&nbsp;</p></div>""",table_height) else: header += add_data("""<p align='justify'>&nbsp;</p>""",table_height) table_height += 1 data_to_send += header total_bundle += count-1 total_weight += i[1] headertable= """<table class = 'headertable'><tr><th>%s</th><th align="left"><strong>%s</strong></th><th>%s</th><th align="left"><strong>%s</strong></th></tr></table>"""%('Total Bundles',total_bundle,'Total Weight','{:.3f}'.format(round(total_weight, 3))) divtable = data_to_send sendata['divtable']=divtable sendata['headertable']=headertable return sendata def add_data(data , num): if num%40 == 0:
else: return data
if ((num // 40)) % 4 == 0 or num == 0: if num ==0: return """<div class='row'> <div class='column' style='margin-left:50px' >""" + data else: return """</tbody></table></div></div> <p >&nbsp;</p><div class='row'> <div class='column' style='margin-left:50px' ><table style ='width :200px'><tbody>""" + data else: return """</table></tbody></div><div class='column' style='margin-left:60px'><table style ='width :200px'><tbody>""" + data
mod.rs
mod parser; mod token; //use std::collections::HashMap; use std::io::{self, BufRead}; use std::str::FromStr; use crate::isa::Op; // XXX Notes: // * maybe register and immediate literals (and variables, in the future) should // have their own symbols at the start? maybe $ for immediates, @ for // registers, and # for variables. // * should there be assembler directives? if so, they could use % at the start. pub fn parse(inp: impl BufRead) -> Result<Vec<Op>, String> { let lines = inp.lines() .collect::<io::Result<Vec<_>>>() .map_err(|e| e.to_string())?; // Pass 1: turn text into internal representation. let code = lines.iter() // get everything before the comment marker (semicolon) and trim the // remaining whitespace .map(|line| line.split(';').nth(0).unwrap().trim()) // keep track of line numbers. MUST go before .filter() .enumerate() // keep non-empty lines .filter(|(_, line)| !line.is_empty()) // try encoding line, report any error with line number .map(|(n, line)| match Op::from_str(line) { Ok(op) => Ok((n, op)), Err(e) => Err(format!("failed to parse line {}, {}", n + 1, e)), }) // simplify into a result and try! it .collect::<Result<Vec<_>, String>>()?; // Pass 2: resolve labels. if cfg!(feature = "labels") { // find instructions with labels let mut jumps: Vec<_> = code.iter() // filter `Op`s by those that have labels .filter(|(_, op)| op.get_label().is_some() ) .map(|(l, op)| (l, op.clone()) ) // keep track of instruction addresses .enumerate() .collect(); jumps.sort_by_key(|(_, (_, op))| op.get_label().unwrap().clone() ); // TODO: figure this out. consult the compiler messages. //let mut label_table = HashMap::new(); for chunk in jumps.chunks(2) { if chunk.len() < 2 || (chunk[0].1).1.get_label() != (chunk[1].1).1.get_label() { return Err(format!( "unpaired label ':{}' (line {})", (chunk[0].1).1.get_label().unwrap(), (chunk[0].1).0 )); }
} } Ok(code.into_iter().map(|(_, op)| op).collect()) }
let _offset = chunk[1].0 - chunk[0].0 - 1;
cluster.go
package constants const ( DeletingIPs = "deleting_ips" Instances = "instances" WorkingIPs = "working_ips" ExpectInstanceNumber = "expect_instance_number" HasNoneIP = "-" HasNoneInstance = "-" Interval = 60 Delay = 5 Retry = 3 BatchMax = 100 DefaultUsername = "root" )
ClusterStatusEnable = "ENABLE" ClusterStatusDisable = "DISABLE" )
const (
input.js
function() { return async () => () => this; }
negotiate.go
package server import ( "bytes" "encoding" "encoding/json" "encoding/xml" "fmt" "io" "net/http" "strings" "sync" "github.com/gin-gonic/gin" "github.com/gin-gonic/gin/binding" "github.com/gin-gonic/gin/render" "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" ) const prettyMarkdownTemplate = "# %s\n\n```%s\n%s\n```\n" type pretty struct { Markdown Title string Format string Data interface{} bufMu sync.Mutex buf io.Reader } func newPretty(c *gin.Context, code int, data interface{}) *pretty { format := c.NegotiateFormat( binding.MIMEJSON, binding.MIMEYAML, ) logrus.WithField("format", format).Trace("pretty") p := pretty{ Title: http.StatusText(code), Format: format, Data: data, } p.Markdown.input = &p return &p } func (s *Server) negotiate(c *gin.Context, code int, data interface{}) { format := c.NegotiateFormat( binding.MIMEJSON, binding.MIMEHTML, binding.MIMEXML, binding.MIMEXML2, binding.MIMEYAML, ) renderer := getRenderer(format, data, c, code) c.Render(code, renderer) } func getRenderer(format string, data interface{}, c *gin.Context, code int) render.Render { logrus.WithField("format", format).Trace("getRenderer") switch format { case binding.MIMEJSON: return render.IndentedJSON{Data: data} case binding.MIMEHTML: return newPretty(c, code, data) case binding.MIMEXML, binding.MIMEXML2: return render.XML{Data: data} case binding.MIMEYAML: return render.YAML{Data: data} default: logrus.WithField("format", format).Warn("unknown format") return render.JSON{Data: data} } } func (p *pretty) Read(buf []byte) (int, error) { p.bufMu.Lock() defer p.bufMu.Unlock() if p.buf == nil { r, err := p.render() if err != nil { logrus.WithError(err).Trace() return 0, err } p.buf = strings.NewReader(r) } return p.buf.Read(buf) } func (p *pretty) render() (string, error) { logrus.Trace("pretty render") var format string var formatted []byte var err error switch p.Format { case binding.MIMEJSON: format = "json" formatted, err = json.MarshalIndent(p.Data, "", " ") case binding.MIMEXML, binding.MIMEXML2: format = "" formatted, err = xml.MarshalIndent(p.Data, "", " ") case binding.MIMEYAML: format = "yaml" formatted, err = yaml.Marshal(p.Data) default: logrus.WithField("format", p.Format).Warn("unknown format") format = "text" formatted, err = marshalText(p.Data) } if err != nil { return "", err } return fmt.Sprintf(prettyMarkdownTemplate, p.Title, format, string(formatted)), nil } func marshalText(data interface{}) ([]byte, error)
{ if m, ok := data.(encoding.TextMarshaler); ok { return m.MarshalText() } var buf bytes.Buffer _, err := fmt.Fprintf(&buf, "%v", data) return buf.Bytes(), err }
test_team.py
import random from unittest import mock from django.conf import settings from posthog.models import EventDefinition, Organization, PluginConfig, PropertyDefinition, Team, User from posthog.plugins.test.mock import mocked_plugin_requests_get from .base import BaseTest class
(BaseTest): def test_team_has_expected_defaults(self): team: Team = Team.objects.create(name="New Team", organization=self.organization) self.assertEqual(team.timezone, "UTC") self.assertEqual(team.data_attributes, ["data-attr"]) def test_create_team_with_test_account_filters(self): team = Team.objects.create_with_data(organization=self.organization) self.assertEqual( team.test_account_filters, [ {"key": "email", "value": "@posthog.com", "operator": "not_icontains", "type": "person"}, { "key": "$host", "operator": "is_not", "value": ["localhost:8000", "localhost:5000", "127.0.0.1:8000", "127.0.0.1:3000", "localhost:3000"], }, ], ) # test generic emails user = User.objects.create(email="[email protected]") organization = Organization.objects.create() organization.members.set([user]) team = Team.objects.create_with_data(organization=organization) self.assertEqual( team.test_account_filters, [ { "key": "$host", "operator": "is_not", "value": ["localhost:8000", "localhost:5000", "127.0.0.1:8000", "127.0.0.1:3000", "localhost:3000"], }, ], ) @mock.patch("requests.get", side_effect=mocked_plugin_requests_get) def test_preinstalled_are_autoenabled(self, mock_get): with self.settings( MULTI_TENANCY=False, PLUGINS_PREINSTALLED_URLS=["https://github.com/PostHog/helloworldplugin/"] ): _, _, new_team = Organization.objects.bootstrap( self.user, plugins_access_level=Organization.PluginsAccessLevel.INSTALL ) self.assertEqual(PluginConfig.objects.filter(team=new_team, enabled=True).count(), 1) self.assertEqual(PluginConfig.objects.filter(team=new_team, enabled=True).get().plugin.name, "helloworldplugin") self.assertEqual(mock_get.call_count, 2)
TestTeam
google_api_repository.py
import requests import json import requests def remote_named_entity_recognition(document, ner_api_secret): assert ner_api_secret and ner_api_secret != 'PLEASE_ADD_YOUR_OWN_GOOGLE_API_KEY_HERE', "Please add you Google API Key for Named Entity Recognition" payload = { "document": { "type": "PLAIN_TEXT", "content": document, "language": "en" # we need to set the language manually, as the google language detection sometimes fails due to e.g. Dutch names }, "encodingType": 'UTF8' } parameters = { 'key': ner_api_secret } url = 'https://language.googleapis.com/v1beta2/documents:analyzeEntities' response = requests.post(url, json=payload, params=parameters) if response.status_code != 200: print("ERROR!!! HTTP: {}. for request '{}'".format(response.status_code, document)) print(response.text)
if __name__ == "__main__": result = remote_named_entity_recognition("Find job id and date of hire for those employees who was hired between November 5th, 2007 and July 5th, 2009.") if result: print(result)
return None else: print("HTTP: {}. for request '{}'".format(response.status_code, document)) return json.loads(response.text)
default_runtime.rs
// Copyright 2020 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT use super::gas_block_store::GasBlockStore; use super::gas_tracker::{price_list_by_epoch, GasCharge, GasTracker, PriceList}; use super::{CircSupplyCalc, LookbackStateGetter, Rand}; use actor::{ account, actorv0, actorv2::{self, ActorDowncast}, actorv3, actorv4, ActorVersion, }; use address::{Address, Protocol}; use blocks::BlockHeader; use byteorder::{BigEndian, WriteBytesExt}; use cid::{Cid, Code::Blake2b256}; use clock::ChainEpoch; use crypto::{DomainSeparationTag, Signature}; use fil_types::{ verifier::ProofVerifier, DefaultNetworkParams, NetworkParams, NetworkVersion, Randomness, }; use fil_types::{PieceInfo, RegisteredSealProof, SealVerifyInfo, WindowPoStVerifyInfo}; use forest_encoding::{blake2b_256, to_vec, Cbor}; use ipld_blockstore::BlockStore; use log::debug; use message::{Message, UnsignedMessage}; use num_bigint::BigInt; use num_traits::Zero; use rayon::prelude::*; use runtime::{ compute_unsealed_sector_cid, ActorCode, ConsensusFault, ConsensusFaultType, MessageInfo, Runtime, Syscalls, }; use state_tree::StateTree; use std::cell::RefCell; use std::collections::{HashMap, HashSet}; use std::error::Error as StdError; use std::marker::PhantomData; use std::rc::Rc; use vm::{ actor_error, ActorError, ActorState, ExitCode, MethodNum, Serialized, TokenAmount, EMPTY_ARR_CID, METHOD_SEND, }; lazy_static! { static ref NUM_CPUS: usize = num_cpus::get(); } /// Max runtime call depth const MAX_CALL_DEPTH: u64 = 4096; // This is just used for gas tracing, intentionally 0 and could be removed. const ACTOR_EXEC_GAS: GasCharge = GasCharge { name: "OnActorExec", compute_gas: 0, storage_gas: 0, }; #[derive(Debug, Clone)] struct VMMsg { caller: Address, receiver: Address, value_received: TokenAmount, } impl MessageInfo for VMMsg { fn caller(&self) -> &Address { assert!( matches!(self.caller.protocol(), Protocol::ID), "runtime message caller was not resolved to ID address" ); &self.caller } fn receiver(&self) -> &Address { // * Can't assert that receiver is an ID address here because it was not being done // * pre NetworkVersion3. Can maybe add in assertion later &self.receiver } fn value_received(&self) -> &TokenAmount { &self.value_received } } /// Implementation of the Runtime trait. pub struct DefaultRuntime<'db, 'vm, BS, R, C, LB, V, P = DefaultNetworkParams> { version: NetworkVersion, state: &'vm mut StateTree<'db, BS>, store: GasBlockStore<'db, BS>, gas_tracker: Rc<RefCell<GasTracker>>, vm_msg: VMMsg, epoch: ChainEpoch, origin: Address, origin_nonce: u64, depth: u64, num_actors_created: u64, price_list: PriceList, rand: &'vm R, caller_validated: bool, allow_internal: bool, registered_actors: &'vm HashSet<Cid>, circ_supply_calc: &'vm C, lb_state: &'vm LB, base_fee: TokenAmount, verifier: PhantomData<V>, params: PhantomData<P>, }
BS: BlockStore, V: ProofVerifier, P: NetworkParams, R: Rand, C: CircSupplyCalc, LB: LookbackStateGetter<'db, BS>, { /// Constructs a new Runtime #[allow(clippy::too_many_arguments)] pub fn new( version: NetworkVersion, state: &'vm mut StateTree<'db, BS>, store: &'db BS, gas_used: i64, base_fee: TokenAmount, message: &UnsignedMessage, epoch: ChainEpoch, origin: Address, origin_nonce: u64, num_actors_created: u64, depth: u64, rand: &'vm R, registered_actors: &'vm HashSet<Cid>, circ_supply_calc: &'vm C, lb_state: &'vm LB, ) -> Result<Self, ActorError> { let price_list = price_list_by_epoch(epoch); let gas_tracker = Rc::new(RefCell::new(GasTracker::new(message.gas_limit(), gas_used))); let gas_block_store = GasBlockStore { price_list: price_list.clone(), gas: Rc::clone(&gas_tracker), store, }; let caller_id = state .lookup_id(&message.from()) .map_err(|e| e.downcast_fatal("failed to lookup id"))? .ok_or_else(|| { actor_error!(SysErrInvalidReceiver, "resolve msg from address failed") })?; let receiver = if version <= NetworkVersion::V3 { *message.to() } else { state .lookup_id(&message.to()) .map_err(|e| e.downcast_fatal("failed to lookup id"))? // * Go implementation changes this to undef address. To avoid using optional // * value here, the non-id address is kept here (should never be used) .unwrap_or(*message.to()) }; let vm_msg = VMMsg { caller: caller_id, receiver, value_received: message.value().clone(), }; Ok(DefaultRuntime { version, state, store: gas_block_store, gas_tracker, vm_msg, epoch, origin, origin_nonce, depth, num_actors_created, price_list, rand, registered_actors, circ_supply_calc, lb_state, base_fee, allow_internal: true, caller_validated: false, params: PhantomData, verifier: PhantomData, }) } /// Adds to amount of used. /// * Will borrow gas tracker RefCell, do not call if any reference to this exists pub fn charge_gas(&mut self, gas: GasCharge) -> Result<(), ActorError> { self.gas_tracker.borrow_mut().charge_gas(gas) } /// Returns gas used by runtime. /// * Will borrow gas tracker RefCell, do not call if a mutable reference exists pub fn gas_used(&self) -> i64 { self.gas_tracker.borrow().gas_used() } fn gas_available(&self) -> i64 { self.gas_tracker.borrow().gas_available() } /// Returns the price list for gas charges within the runtime. pub fn price_list(&self) -> &PriceList { &self.price_list } /// Get the balance of a particular Actor from their Address. fn get_balance(&self, addr: &Address) -> Result<BigInt, ActorError> { Ok(self .state .get_actor(&addr) .map_err(|e| e.downcast_fatal("failed to get actor in get balance"))? .map(|act| act.balance) .unwrap_or_default()) } /// Update the state Cid of the Message receiver. fn state_commit(&mut self, old_h: &Cid, new_h: Cid) -> Result<(), ActorError> { let to_addr = *self.message().receiver(); let mut actor = self .state .get_actor(&to_addr) .map_err(|e| e.downcast_fatal("failed to get actor to commit state"))? .ok_or_else(|| actor_error!(fatal("failed to get actor to commit state")))?; if &actor.state != old_h { return Err(actor_error!(fatal( "failed to update, inconsistent base reference" ))); } actor.state = new_h; self.state .set_actor(&to_addr, actor) .map_err(|e| e.downcast_fatal("failed to set actor in state_commit"))?; Ok(()) } fn abort_if_already_validated(&mut self) -> Result<(), ActorError> { if self.caller_validated { Err(actor_error!(SysErrIllegalActor; "Method must validate caller identity exactly once")) } else { self.caller_validated = true; Ok(()) } } /// Helper function for inserting into blockstore. fn put<T>(&self, obj: &T) -> Result<Cid, ActorError> where T: Cbor, { self.store .put(obj, Blake2b256) .map_err(|e| e.downcast_fatal("failed to put cbor object")) } /// Helper function for getting deserializable objects from blockstore. fn get<T>(&self, cid: &Cid) -> Result<Option<T>, ActorError> where T: Cbor, { self.store .get(cid) .map_err(|e| e.downcast_fatal("failed to get cbor object")) } fn internal_send( &mut self, from: Address, to: Address, method: MethodNum, value: TokenAmount, params: Serialized, ) -> Result<Serialized, ActorError> { let msg = UnsignedMessage { from, to, method_num: method, value, params, gas_limit: self.gas_available(), version: Default::default(), sequence: Default::default(), gas_fee_cap: Default::default(), gas_premium: Default::default(), }; // snapshot state tree self.state .snapshot() .map_err(|e| actor_error!(fatal("failed to create snapshot: {}", e)))?; let send_res = self.send(&msg, None); let ret = send_res.map_err(|e| { if let Err(e) = self.state.revert_to_snapshot() { actor_error!(fatal("failed to revert snapshot: {}", e)) } else { e } }); if let Err(e) = self.state.clear_snapshot() { actor_error!(fatal("failed to clear snapshot: {}", e)); } ret } /// Shared logic between the DefaultRuntime and the Interpreter. /// It invokes methods on different Actors based on the Message. /// This function is somewhat equivalent to the go implementation's vm send. pub fn send( &mut self, msg: &UnsignedMessage, gas_cost: Option<GasCharge>, ) -> Result<Serialized, ActorError> { // Since it is unsafe to share a mutable reference to the state tree by copying // the runtime, all variables must be copied and reset at the end of the transition. // This logic is the equivalent to the go implementation creating a new runtime with // shared values. // All other fields will be updated from the execution. let prev_val = self.caller_validated; let prev_depth = self.depth; let prev_msg = self.vm_msg.clone(); let res = self.execute_send(msg, gas_cost); // Reset values back to their values before the call self.vm_msg = prev_msg; self.caller_validated = prev_val; self.depth = prev_depth; res } /// Helper function to handle all of the execution logic folded into single result. /// This is necessary to follow to follow the same control flow of the go implementation /// cleanly without doing anything memory unsafe. fn execute_send( &mut self, msg: &UnsignedMessage, gas_cost: Option<GasCharge>, ) -> Result<Serialized, ActorError> { // * Following logic would be called in the go runtime initialization. // * Since We reuse the runtime, all of these things need to happen on each call self.caller_validated = false; self.depth += 1; if self.depth > MAX_CALL_DEPTH && self.network_version() >= NetworkVersion::V6 { return Err(actor_error!( SysErrForbidden, "message execution exceeds call depth" )); } let caller = self.resolve_address(msg.from())?.ok_or_else(|| { actor_error!( SysErrInvalidReceiver, "resolving from address in internal send failed" ) })?; let receiver = if self.network_version() <= NetworkVersion::V3 { msg.to } else if let Some(resolved) = self.resolve_address(msg.to())? { resolved } else { msg.to }; self.vm_msg = VMMsg { caller, receiver, value_received: msg.value().clone(), }; // * End of logic that is performed on go runtime initialization if let Some(cost) = gas_cost { self.charge_gas(cost)?; } let to_actor = match self .state .get_actor(msg.to()) .map_err(|e| e.downcast_fatal("failed to get actor"))? { Some(act) => act, None => { // Try to create actor if not exist let (to_actor, id_addr) = self.try_create_account_actor(msg.to())?; if self.network_version() > NetworkVersion::V3 { // Update the receiver to the created ID address self.vm_msg.receiver = id_addr; } to_actor } }; self.charge_gas( self.price_list() .on_method_invocation(msg.value(), msg.method_num()), )?; if !msg.value().is_zero() { transfer(self.state, &msg.from(), &msg.to(), &msg.value()) .map_err(|e| e.wrap("failed to transfer funds"))?; } if msg.method_num() != METHOD_SEND { self.charge_gas(ACTOR_EXEC_GAS)?; return self.invoke(to_actor.code, msg.method_num(), msg.params(), msg.to()); } Ok(Serialized::default()) } /// Calls actor code with method and parameters. fn invoke( &mut self, code: Cid, method_num: MethodNum, params: &Serialized, to: &Address, ) -> Result<Serialized, ActorError> { let ret = if let Some(ret) = { match actor::ActorVersion::from(self.network_version()) { ActorVersion::V0 => actorv0::invoke_code(&code, self, method_num, params), ActorVersion::V2 => actorv2::invoke_code(&code, self, method_num, params), ActorVersion::V3 => actorv3::invoke_code(&code, self, method_num, params), ActorVersion::V4 => actorv4::invoke_code(&code, self, method_num, params), } } { ret } else if code == *actorv2::CHAOS_ACTOR_CODE_ID && self.registered_actors.contains(&code) { actorv2::chaos::Actor::invoke_method(self, method_num, params) } else { Err(actor_error!( SysErrIllegalActor, "no code for actor at address {}", to )) }?; if !self.caller_validated { Err( actor_error!(SysErrIllegalActor; "Caller must be validated during method execution"), ) } else { Ok(ret) } } /// creates account actors from only BLS/SECP256K1 addresses. pub fn try_create_account_actor( &mut self, addr: &Address, ) -> Result<(ActorState, Address), ActorError> { self.charge_gas(self.price_list().on_create_actor())?; let addr_id = self .state .register_new_address(addr) .map_err(|e| e.downcast_fatal("failed to register new address"))?; let version = ActorVersion::from(self.network_version()); let act = make_actor(addr, version)?; self.state .set_actor(&addr_id, act) .map_err(|e| e.downcast_fatal("failed to set actor"))?; let p = Serialized::serialize(&addr).map_err(|e| { actor_error!(fatal( "couldn't serialize params for actor construction: {}", e )) })?; self.internal_send( **actor::system::ADDRESS, addr_id, account::Method::Constructor as u64, TokenAmount::from(0), p, ) .map_err(|e| e.wrap("failed to invoke account constructor"))?; let act = self .state .get_actor(&addr_id) .map_err(|e| e.downcast_fatal("failed to get actor"))? .ok_or_else(|| actor_error!(fatal("failed to retrieve created actor state")))?; Ok((act, addr_id)) } fn verify_block_signature(&self, bh: &BlockHeader) -> Result<(), Box<dyn StdError>> { let worker_addr = self.worker_key_at_lookback(bh.epoch())?; bh.check_block_signature(&worker_addr)?; Ok(()) } fn worker_key_at_lookback(&self, height: ChainEpoch) -> Result<Address, Box<dyn StdError>> { if self.network_version() >= NetworkVersion::V7 && height < self.epoch - actor::CHAIN_FINALITY { return Err(format!( "cannot get worker key (current epoch: {}, height: {})", self.epoch, height ) .into()); } let lb_state = self.lb_state.state_lookback(height)?; let actor = lb_state // * @austinabell: Yes, this is intentional (should be updated with v3 actors though) .get_actor(self.vm_msg.receiver())? .ok_or_else(|| format!("actor not found {:?}", self.vm_msg.receiver()))?; let ms = actor::miner::State::load(&self.store, &actor)?; let worker = ms.info(&self.store)?.worker; resolve_to_key_addr(&self.state, &self.store, &worker) } } impl<'bs, BS, R, CS, LB, V, P> Runtime<GasBlockStore<'bs, BS>> for DefaultRuntime<'bs, '_, BS, R, CS, LB, V, P> where BS: BlockStore, V: ProofVerifier, P: NetworkParams, R: Rand, CS: CircSupplyCalc, LB: LookbackStateGetter<'bs, BS>, { fn network_version(&self) -> NetworkVersion { self.version } fn message(&self) -> &dyn MessageInfo { &self.vm_msg } fn curr_epoch(&self) -> ChainEpoch { self.epoch } fn validate_immediate_caller_accept_any(&mut self) -> Result<(), ActorError> { self.abort_if_already_validated() } fn validate_immediate_caller_is<'db, I>(&mut self, addresses: I) -> Result<(), ActorError> where I: IntoIterator<Item = &'db Address>, { self.abort_if_already_validated()?; let imm = self.message().caller(); // Check if theres is at least one match if !addresses.into_iter().any(|a| a == imm) { return Err(actor_error!(SysErrForbidden; "caller {} is not one of supported", self.message().caller() )); } Ok(()) } fn validate_immediate_caller_type<'db, I>(&mut self, types: I) -> Result<(), ActorError> where I: IntoIterator<Item = &'db Cid>, { self.abort_if_already_validated()?; let caller_cid = self .get_actor_code_cid(self.message().caller())? .ok_or_else(|| actor_error!(fatal("failed to lookup code cid for caller")))?; if !types.into_iter().any(|c| *c == caller_cid) { return Err(actor_error!(SysErrForbidden; "caller cid type {} not one of supported", caller_cid)); } Ok(()) } fn current_balance(&self) -> Result<TokenAmount, ActorError> { self.get_balance(self.message().receiver()) } fn resolve_address(&self, address: &Address) -> Result<Option<Address>, ActorError> { self.state .lookup_id(&address) .map_err(|e| e.downcast_fatal("failed to look up id")) } fn get_actor_code_cid(&self, addr: &Address) -> Result<Option<Cid>, ActorError> { Ok(self .state .get_actor(&addr) .map_err(|e| e.downcast_fatal("failed to get actor"))? .map(|act| act.code)) } fn get_randomness_from_tickets( &self, personalization: DomainSeparationTag, rand_epoch: ChainEpoch, entropy: &[u8], ) -> Result<Randomness, ActorError> { let r = if rand_epoch > networks::UPGRADE_PLACEHOLDER_HEIGHT { self.rand .get_chain_randomness_looking_forward(personalization, rand_epoch, entropy) .map_err(|e| e.downcast_fatal("could not get randomness"))? } else { self.rand .get_chain_randomness(personalization, rand_epoch, entropy) .map_err(|e| e.downcast_fatal("could not get randomness"))? }; Ok(Randomness(r)) } fn get_randomness_from_beacon( &self, personalization: DomainSeparationTag, rand_epoch: ChainEpoch, entropy: &[u8], ) -> Result<Randomness, ActorError> { let r = if rand_epoch > networks::UPGRADE_PLACEHOLDER_HEIGHT { self.rand .get_beacon_randomness_looking_forward(personalization, rand_epoch, entropy) .map_err(|e| e.downcast_fatal("could not get randomness"))? } else { self.rand .get_beacon_randomness(personalization, rand_epoch, entropy) .map_err(|e| e.downcast_fatal("could not get randomness"))? }; Ok(Randomness(r)) } fn create<C: Cbor>(&mut self, obj: &C) -> Result<(), ActorError> { let c = self.put(obj)?; self.state_commit(&EMPTY_ARR_CID, c) } fn state<C: Cbor>(&self) -> Result<C, ActorError> { let actor = self .state .get_actor(self.message().receiver()) .map_err(|e| { e.downcast_default( ExitCode::SysErrIllegalArgument, "failed to get actor for Readonly state", ) })? .ok_or_else( || actor_error!(SysErrIllegalArgument; "Actor readonly state does not exist"), )?; self.get(&actor.state)?.ok_or_else(|| { actor_error!(fatal( "State does not exist for actor state cid: {}", actor.state )) }) } fn transaction<C, RT, F>(&mut self, f: F) -> Result<RT, ActorError> where C: Cbor, F: FnOnce(&mut C, &mut Self) -> Result<RT, ActorError>, { // get actor let act = self .state .get_actor(self.message().receiver()) .map_err(|e| { e.downcast_default( ExitCode::SysErrIllegalActor, "failed to get actor for transaction", ) })? .ok_or_else(|| { actor_error!(SysErrIllegalActor; "actor state for transaction doesn't exist") })?; // get state for actor based on generic C let mut state: C = self .get(&act.state)? .ok_or_else(|| actor_error!(fatal("Actor state does not exist: {}", act.state)))?; // Update the state self.allow_internal = false; let r = f(&mut state, self); self.allow_internal = true; // Return error after allow_internal is reset let r = r?; let c = self.put(&state)?; // Committing that change self.state_commit(&act.state, c)?; Ok(r) } fn store(&self) -> &GasBlockStore<'bs, BS> { &self.store } fn send( &mut self, to: Address, method: MethodNum, params: Serialized, value: TokenAmount, ) -> Result<Serialized, ActorError> { if !self.allow_internal { return Err(actor_error!(SysErrIllegalActor; "runtime.send() is not allowed")); } let ret = self .internal_send(*self.message().receiver(), to, method, value, params) .map_err(|e| { debug!( "internal send failed: (to: {}) (method: {}) {}", to, method, e ); e })?; Ok(ret) } fn new_actor_address(&mut self) -> Result<Address, ActorError> { // ! Go implementation doesn't handle the error for some reason here and will panic let oa = resolve_to_key_addr(self.state, self.store.store, &self.origin) .map_err(|e| e.downcast_fatal("failed to resolve key addr"))?; let mut b = to_vec(&oa).map_err(|e| { actor_error!(fatal( "Could not serialize address in new_actor_address: {}", e )) })?; b.write_u64::<BigEndian>(self.origin_nonce) .map_err(|e| actor_error!(fatal("Writing nonce address into a buffer: {}", e)))?; b.write_u64::<BigEndian>(self.num_actors_created) .map_err(|e| { actor_error!(fatal( "Writing number of actors created into a buffer: {}", e )) })?; let addr = Address::new_actor(&b); self.num_actors_created += 1; Ok(addr) } fn create_actor(&mut self, code_id: Cid, address: &Address) -> Result<(), ActorError> { // * Lotus does undef address check here, should be impossible to hit. // * if diff with `SysErrIllegalArgument` check here if !actor::is_builtin_actor(&code_id) { return Err(actor_error!(SysErrIllegalArgument; "Can only create built-in actors.")); } if actor::is_singleton_actor(&code_id) { return Err(actor_error!(SysErrIllegalArgument; "Can only have one instance of singleton actors.")); } if let Ok(Some(_)) = self.state.get_actor(address) { return Err(actor_error!(SysErrIllegalArgument; "Actor address already exists")); } self.charge_gas(self.price_list.on_create_actor())?; self.state .set_actor( &address, ActorState::new(code_id, *EMPTY_ARR_CID, 0.into(), 0), ) .map_err(|e| e.downcast_fatal("creating actor entry")) } /// DeleteActor deletes the executing actor from the state tree, transferring /// any balance to beneficiary. /// Aborts if the beneficiary does not exist. /// May only be called by the actor itself. fn delete_actor(&mut self, beneficiary: &Address) -> Result<(), ActorError> { self.charge_gas(self.price_list.on_delete_actor())?; let receiver = *self.message().receiver(); let balance = self .state .get_actor(&receiver) .map_err(|e| e.downcast_fatal(format!("failed to get actor {}", receiver)))? .ok_or_else(|| actor_error!(SysErrIllegalActor; "failed to load actor in delete actor")) .map(|act| act.balance)?; if balance != 0.into() { if self.version >= NetworkVersion::V7 { let beneficiary_id = self.resolve_address(&beneficiary)?.ok_or_else(|| { actor_error!(SysErrIllegalArgument, "beneficiary doesn't exist") })?; if &beneficiary_id == self.message().receiver() { return Err(actor_error!( SysErrIllegalArgument, "benefactor cannot be beneficiary" )); } } // Transfer the executing actor's balance to the beneficiary transfer(self.state, &receiver, beneficiary, &balance) .map_err(|e| e.wrap("failed to transfer balance to beneficiary actor"))?; } // Delete the executing actor self.state .delete_actor(&receiver) .map_err(|e| e.downcast_fatal("failed to delete actor")) } fn total_fil_circ_supply(&self) -> Result<TokenAmount, ActorError> { self.circ_supply_calc .get_supply(self.epoch, self.state) .map_err(|e| actor_error!(ErrIllegalState, "failed to get total circ supply: {}", e)) } fn charge_gas(&mut self, name: &'static str, compute: i64) -> Result<(), ActorError> { self.charge_gas(GasCharge::new(name, compute, 0)) } fn base_fee(&self) -> &TokenAmount { &self.base_fee } } impl<'bs, BS, R, C, LB, V, P> Syscalls for DefaultRuntime<'bs, '_, BS, R, C, LB, V, P> where BS: BlockStore, V: ProofVerifier, P: NetworkParams, R: Rand, C: CircSupplyCalc, LB: LookbackStateGetter<'bs, BS>, { fn verify_signature( &self, signature: &Signature, signer: &Address, plaintext: &[u8], ) -> Result<(), Box<dyn StdError>> { self.gas_tracker.borrow_mut().charge_gas( self.price_list .on_verify_signature(signature.signature_type()), )?; // Resolve to key address before verifying signature. let signing_addr = resolve_to_key_addr(self.state, &self.store, signer)?; Ok(signature.verify(plaintext, &signing_addr)?) } fn hash_blake2b(&self, data: &[u8]) -> Result<[u8; 32], Box<dyn StdError>> { self.gas_tracker .borrow_mut() .charge_gas(self.price_list.on_hashing(data.len()))?; Ok(blake2b_256(data)) } fn compute_unsealed_sector_cid( &self, reg: RegisteredSealProof, pieces: &[PieceInfo], ) -> Result<Cid, Box<dyn StdError>> { self.gas_tracker .borrow_mut() .charge_gas(self.price_list.on_compute_unsealed_sector_cid(reg, pieces))?; compute_unsealed_sector_cid(reg, pieces) } fn verify_seal(&self, vi: &SealVerifyInfo) -> Result<(), Box<dyn StdError>> { self.gas_tracker .borrow_mut() .charge_gas(self.price_list.on_verify_seal(vi))?; V::verify_seal(vi) } fn verify_post(&self, vi: &WindowPoStVerifyInfo) -> Result<(), Box<dyn StdError>> { self.gas_tracker .borrow_mut() .charge_gas(self.price_list.on_verify_post(vi))?; V::verify_window_post(vi.randomness, &vi.proofs, &vi.challenged_sectors, vi.prover) } fn verify_consensus_fault( &self, h1: &[u8], h2: &[u8], extra: &[u8], ) -> Result<Option<ConsensusFault>, Box<dyn StdError>> { self.gas_tracker .borrow_mut() .charge_gas(self.price_list.on_verify_consensus_fault())?; // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so // (which runs a syntactic check), we do it directly on the CIDs. // (0) cheap preliminary checks if h1 == h2 { return Err(format!( "no consensus fault: submitted blocks are the same: {:?}, {:?}", h1, h2 ) .into()); }; let bh_1 = BlockHeader::unmarshal_cbor(h1)?; let bh_2 = BlockHeader::unmarshal_cbor(h2)?; if bh_1.cid() == bh_2.cid() { return Err("no consensus fault: submitted blocks are the same".into()); } // (1) check conditions necessary to any consensus fault if bh_1.miner_address() != bh_2.miner_address() { return Err(format!( "no consensus fault: blocks not mined by same miner: {:?}, {:?}", bh_1.miner_address(), bh_2.miner_address() ) .into()); }; // block a must be earlier or equal to block b, epoch wise (ie at least as early in the chain). if bh_2.epoch() < bh_1.epoch() { return Err(format!( "first block must not be of higher height than second: {:?}, {:?}", bh_1.epoch(), bh_2.epoch() ) .into()); }; // (2) check for the consensus faults themselves let mut cf: Option<ConsensusFault> = None; // (a) double-fork mining fault if bh_1.epoch() == bh_2.epoch() { cf = Some(ConsensusFault { target: *bh_1.miner_address(), epoch: bh_2.epoch(), fault_type: ConsensusFaultType::DoubleForkMining, }) }; // (b) time-offset mining fault // strictly speaking no need to compare heights based on double fork mining check above, // but at same height this would be a different fault. if bh_1.parents() == bh_2.parents() && bh_1.epoch() != bh_2.epoch() { cf = Some(ConsensusFault { target: *bh_1.miner_address(), epoch: bh_2.epoch(), fault_type: ConsensusFaultType::TimeOffsetMining, }) }; // (c) parent-grinding fault // Here extra is the "witness", a third block that shows the connection between A and B as // A's sibling and B's parent. // Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset if !extra.is_empty() { let bh_3 = BlockHeader::unmarshal_cbor(extra)?; if bh_1.parents() == bh_3.parents() && bh_1.epoch() == bh_3.epoch() && bh_2.parents().cids().contains(bh_3.cid()) && !bh_2.parents().cids().contains(bh_1.cid()) { cf = Some(ConsensusFault { target: *bh_1.miner_address(), epoch: bh_2.epoch(), fault_type: ConsensusFaultType::ParentGrinding, }) } }; // (3) return if no consensus fault if cf.is_some() { // (4) expensive final checks // check blocks are properly signed by their respective miner // note we do not need to check extra's: it is a parent to block b // which itself is signed, so it was willingly included by the miner self.verify_block_signature(&bh_1)?; self.verify_block_signature(&bh_2)?; } Ok(cf) } fn batch_verify_seals( &self, vis: &[(&Address, &Vec<SealVerifyInfo>)], ) -> Result<HashMap<Address, Vec<bool>>, Box<dyn StdError>> { // Gas charged for batch verify in actor let out = vis .par_iter() .with_min_len(vis.len() / *NUM_CPUS) .map(|(&addr, seals)| { let results = seals .par_iter() .map(|s| { if let Err(err) = V::verify_seal(s) { debug!( "seal verify in batch failed (miner: {}) (err: {})", addr, err ); false } else { true } }) .collect(); (addr, results) }) .collect(); Ok(out) } fn verify_aggregate_seals( &self, aggregate: &fil_types::AggregateSealVerifyProofAndInfos, ) -> Result<(), Box<dyn StdError>> { self.gas_tracker .borrow_mut() .charge_gas(self.price_list.on_verify_aggregate_seals(&aggregate))?; V::verify_aggregate_seals(aggregate) } } /// Transfers funds from one Actor to another Actor fn transfer<BS: BlockStore>( state: &mut StateTree<BS>, from: &Address, to: &Address, value: &TokenAmount, ) -> Result<(), ActorError> { if from == to { return Ok(()); } let from_id = state .lookup_id(from) .map_err(|e| e.downcast_fatal("failed to lookup from id for address"))? .ok_or_else(|| actor_error!(fatal("Failed to lookup from id for address {}", from)))?; let to_id = state .lookup_id(to) .map_err(|e| e.downcast_fatal("failed to lookup to id for address"))? .ok_or_else(|| actor_error!(fatal("Failed to lookup to id for address {}", to)))?; if from_id == to_id { return Ok(()); } if value < &0.into() { return Err(actor_error!(SysErrForbidden; "attempted to transfer negative transfer value {}", value)); } let mut f = state .get_actor(&from_id) .map_err(|e| e.downcast_fatal("failed to get actor"))? .ok_or_else(|| { actor_error!(fatal( "sender actor does not exist in state during transfer" )) })?; let mut t = state .get_actor(&to_id) .map_err(|e| e.downcast_fatal("failed to get actor: {}"))? .ok_or_else(|| { actor_error!(fatal( "receiver actor does not exist in state during transfer" )) })?; f.deduct_funds(&value).map_err(|e| { actor_error!(SysErrInsufficientFunds; "transfer failed when deducting funds ({}): {}", value, e) })?; t.deposit_funds(&value); state .set_actor(from, f) .map_err(|e| e.downcast_fatal("failed to set from actor"))?; state .set_actor(to, t) .map_err(|e| e.downcast_fatal("failed to set to actor"))?; Ok(()) } /// returns the public key type of address (`BLS`/`SECP256K1`) of an account actor /// identified by `addr`. pub fn resolve_to_key_addr<'st, 'bs, BS, S>( st: &'st StateTree<'bs, S>, store: &'bs BS, addr: &Address, ) -> Result<Address, Box<dyn StdError>> where BS: BlockStore, S: BlockStore, { if addr.protocol() == Protocol::BLS || addr.protocol() == Protocol::Secp256k1 { return Ok(*addr); } let act = st .get_actor(&addr) .map_err(|e| e.downcast_wrap("Failed to get actor"))? .ok_or_else(|| format!("Failed to retrieve actor: {}", addr))?; let acc_st = account::State::load(store, &act)?; Ok(acc_st.pubkey_address()) } fn make_actor(addr: &Address, version: ActorVersion) -> Result<ActorState, ActorError> { match addr.protocol() { Protocol::BLS | Protocol::Secp256k1 => Ok(new_account_actor(version)), Protocol::ID => { Err(actor_error!(SysErrInvalidReceiver; "no actor with given id: {}", addr)) } Protocol::Actor => Err(actor_error!(SysErrInvalidReceiver; "no such actor: {}", addr)), } } fn new_account_actor(version: ActorVersion) -> ActorState { ActorState { code: match version { ActorVersion::V0 => *actorv0::ACCOUNT_ACTOR_CODE_ID, ActorVersion::V2 => *actorv2::ACCOUNT_ACTOR_CODE_ID, ActorVersion::V3 => *actorv3::ACCOUNT_ACTOR_CODE_ID, ActorVersion::V4 => *actorv4::ACCOUNT_ACTOR_CODE_ID, }, balance: TokenAmount::from(0), state: *EMPTY_ARR_CID, sequence: 0, } }
impl<'db, 'vm, BS, R, C, LB, V, P> DefaultRuntime<'db, 'vm, BS, R, C, LB, V, P> where
add_taxonomies_to_paleoarxiv.py
import os import json import logging import sys from django.db import transaction from django.apps import apps from scripts import utils as script_utils from scripts.populate_preprint_providers import update_or_create from osf.models import PreprintProvider, Subject from website.app import init_app from website import settings logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def add_subjects_to_paleorxiv(): paleoarix = PreprintProvider.objects.get(_id='paleorxiv') bepress_subject = Subject.objects.get(text='Paleontology', provider___id='osf') life_sciences = Subject.objects.get(text='Earth and Life Sciences', provider=paleoarix) ichnology = Subject(text='Ichnology', provider=paleoarix, parent=life_sciences, bepress_subject=bepress_subject) ichnology.save() taphonomy = Subject(text='Taphonomy', provider=paleoarix, parent=life_sciences, bepress_subject=bepress_subject) taphonomy.save() paleoarix.save() def main():
if __name__ == '__main__': main()
init_app(set_backends=True, routes=False) dry_run = '--dry' in sys.argv if not dry_run: script_utils.add_file_logger(logger, __file__) with transaction.atomic(): add_subjects_to_paleorxiv() if dry_run: raise RuntimeError('Dry run, transaction rolled back')
blockstore.rs
//! The `blockstore` module provides functions for parallel verification of the //! Proof of History ledger as well as iterative read, append write, and random //! access read to a persistent file-based ledger. use crate::{ ancestor_iterator::AncestorIterator, blockstore_db::{ columns as cf, AccessType, BlockstoreRecoveryMode, Column, Database, IteratorDirection, IteratorMode, LedgerColumn, Result, WriteBatch, }, blockstore_meta::*, entry::{create_ticks, Entry}, erasure::ErasureConfig, leader_schedule_cache::LeaderScheduleCache, next_slots_iterator::NextSlotsIterator, shred::{Result as ShredResult, Shred, Shredder}, }; pub use crate::{blockstore_db::BlockstoreError, blockstore_meta::SlotMeta}; use bincode::deserialize; use log::*; use rayon::{ iter::{IntoParallelRefIterator, ParallelIterator}, ThreadPool, }; use rocksdb::DBRawIterator; use solana_measure::measure::Measure; use solana_metrics::{datapoint_debug, datapoint_error}; use solana_rayon_threadlimit::get_thread_count; use solana_runtime::hardened_unpack::{unpack_genesis_archive, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}; use solana_sdk::{ clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, MS_PER_TICK}, genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey, sanitize::Sanitize, signature::{Keypair, Signature, Signer}, timing::timestamp, transaction::Transaction, }; use solana_storage_proto::{StoredExtendedRewards, StoredTransactionStatusMeta}; use solana_transaction_status::{ ConfirmedBlock, ConfirmedTransaction, ConfirmedTransactionStatusWithSignature, Rewards, TransactionStatusMeta, TransactionWithStatusMeta, }; use std::{ borrow::Cow, cell::RefCell, cmp, collections::{HashMap, HashSet}, convert::TryInto, fs, io::{Error as IoError, ErrorKind}, path::{Path, PathBuf}, rc::Rc, sync::{ mpsc::{sync_channel, Receiver, SyncSender, TrySendError}, Arc, Mutex, RwLock, }, }; use thiserror::Error; use trees::{Tree, TreeWalk}; pub mod blockstore_purge; pub const BLOCKSTORE_DIRECTORY: &str = "rocksdb"; thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) .thread_name(|ix| format!("blockstore_{}", ix)) .build() .unwrap())); thread_local!(static PAR_THREAD_POOL_ALL_CPUS: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new() .num_threads(num_cpus::get()) .thread_name(|ix| format!("blockstore_{}", ix)) .build() .unwrap())); pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000; pub const MAX_TURBINE_PROPAGATION_IN_MS: u64 = 100; pub const MAX_TURBINE_DELAY_IN_TICKS: u64 = MAX_TURBINE_PROPAGATION_IN_MS / MS_PER_TICK; // An upper bound on maximum number of data shreds we can handle in a slot // 32K shreds would allow ~320K peak TPS // (32K shreds per slot * 4 TX per shred * 2.5 slots per sec) pub const MAX_DATA_SHREDS_PER_SLOT: usize = 32_768; pub type CompletedSlotsReceiver = Receiver<Vec<u64>>; type CompletedRanges = Vec<(u32, u32)>; #[derive(Clone, Copy)] pub enum PurgeType { Exact, PrimaryIndex, } #[derive(Error, Debug)] pub enum InsertDataShredError { Exists, InvalidShred, BlockstoreError(#[from] BlockstoreError), } impl std::fmt::Display for InsertDataShredError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "insert data shred error") } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct CompletedDataSetInfo { pub slot: Slot, pub start_index: u32, pub end_index: u32, } pub struct BlockstoreSignals { pub blockstore: Blockstore, pub ledger_signal_receiver: Receiver<bool>, pub completed_slots_receivers: [CompletedSlotsReceiver; 2], } // ledger window pub struct Blockstore { ledger_path: PathBuf, db: Arc<Database>, meta_cf: LedgerColumn<cf::SlotMeta>, dead_slots_cf: LedgerColumn<cf::DeadSlots>, duplicate_slots_cf: LedgerColumn<cf::DuplicateSlots>, erasure_meta_cf: LedgerColumn<cf::ErasureMeta>, orphans_cf: LedgerColumn<cf::Orphans>, index_cf: LedgerColumn<cf::Index>, data_shred_cf: LedgerColumn<cf::ShredData>, code_shred_cf: LedgerColumn<cf::ShredCode>, transaction_status_cf: LedgerColumn<cf::TransactionStatus>, address_signatures_cf: LedgerColumn<cf::AddressSignatures>, transaction_status_index_cf: LedgerColumn<cf::TransactionStatusIndex>, active_transaction_status_index: RwLock<u64>, rewards_cf: LedgerColumn<cf::Rewards>, blocktime_cf: LedgerColumn<cf::Blocktime>, perf_samples_cf: LedgerColumn<cf::PerfSamples>, last_root: Arc<RwLock<Slot>>, insert_shreds_lock: Arc<Mutex<()>>, pub new_shreds_signals: Vec<SyncSender<bool>>, pub completed_slots_senders: Vec<SyncSender<Vec<Slot>>>, pub lowest_cleanup_slot: Arc<RwLock<u64>>, no_compaction: bool, } pub struct IndexMetaWorkingSetEntry { index: Index, // true only if at least one shred for this Index was inserted since the time this // struct was created did_insert_occur: bool, } pub struct SlotMetaWorkingSetEntry { new_slot_meta: Rc<RefCell<SlotMeta>>, old_slot_meta: Option<SlotMeta>, // True only if at least one shred for this SlotMeta was inserted since the time this // struct was created. did_insert_occur: bool, } #[derive(Default)] pub struct BlockstoreInsertionMetrics { pub num_shreds: usize, pub insert_lock_elapsed: u64, pub insert_shreds_elapsed: u64, pub shred_recovery_elapsed: u64, pub chaining_elapsed: u64, pub commit_working_sets_elapsed: u64, pub write_batch_elapsed: u64, pub total_elapsed: u64, pub num_inserted: u64, pub num_repair: u64, pub num_recovered: usize, pub num_recovered_inserted: usize, pub num_recovered_failed_sig: usize, pub num_recovered_failed_invalid: usize, pub num_recovered_exists: usize, pub index_meta_time: u64, } impl SlotMetaWorkingSetEntry { fn new(new_slot_meta: Rc<RefCell<SlotMeta>>, old_slot_meta: Option<SlotMeta>) -> Self { Self { new_slot_meta, old_slot_meta, did_insert_occur: false, } } } impl BlockstoreInsertionMetrics { pub fn report_metrics(&self, metric_name: &'static str) { datapoint_info!( metric_name, ("num_shreds", self.num_shreds as i64, i64), ("total_elapsed", self.total_elapsed as i64, i64), ("insert_lock_elapsed", self.insert_lock_elapsed as i64, i64), ( "insert_shreds_elapsed", self.insert_shreds_elapsed as i64, i64 ), ( "shred_recovery_elapsed", self.shred_recovery_elapsed as i64, i64 ), ("chaining_elapsed", self.chaining_elapsed as i64, i64), ( "commit_working_sets_elapsed", self.commit_working_sets_elapsed as i64, i64 ), ("write_batch_elapsed", self.write_batch_elapsed as i64, i64), ("num_inserted", self.num_inserted as i64, i64), ("num_repair", self.num_repair as i64, i64), ("num_recovered", self.num_recovered as i64, i64), ( "num_recovered_inserted", self.num_recovered_inserted as i64, i64 ), ( "num_recovered_failed_sig", self.num_recovered_failed_sig as i64, i64 ), ( "num_recovered_failed_invalid", self.num_recovered_failed_invalid as i64, i64 ), ( "num_recovered_exists", self.num_recovered_exists as i64, i64 ), ); } } impl Blockstore { pub fn db(self) -> Arc<Database> { self.db } pub fn ledger_path(&self) -> &Path { &self.ledger_path } /// Opens a Ledger in directory, provides "infinite" window of shreds pub fn open(ledger_path: &Path) -> Result<Blockstore> { Self::do_open(ledger_path, AccessType::PrimaryOnly, None, true) } pub fn open_with_access_type( ledger_path: &Path, access_type: AccessType, recovery_mode: Option<BlockstoreRecoveryMode>, enforce_ulimit_nofile: bool, ) -> Result<Blockstore> { Self::do_open( ledger_path, access_type, recovery_mode, enforce_ulimit_nofile, ) } fn do_open( ledger_path: &Path, access_type: AccessType, recovery_mode: Option<BlockstoreRecoveryMode>, enforce_ulimit_nofile: bool, ) -> Result<Blockstore> { fs::create_dir_all(&ledger_path)?; let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY); adjust_ulimit_nofile(enforce_ulimit_nofile)?; // Open the database let mut measure = Measure::start("open"); info!("Opening database at {:?}", blockstore_path); let db = Database::open(&blockstore_path, access_type, recovery_mode)?; // Create the metadata column family let meta_cf = db.column(); // Create the dead slots column family let dead_slots_cf = db.column(); let duplicate_slots_cf = db.column(); let erasure_meta_cf = db.column(); // Create the orphans column family. An "orphan" is defined as // the head of a detached chain of slots, i.e. a slot with no // known parent let orphans_cf = db.column(); let index_cf = db.column(); let data_shred_cf = db.column(); let code_shred_cf = db.column(); let transaction_status_cf = db.column(); let address_signatures_cf = db.column(); let transaction_status_index_cf = db.column(); let rewards_cf = db.column(); let blocktime_cf = db.column(); let perf_samples_cf = db.column(); let db = Arc::new(db); // Get max root or 0 if it doesn't exist let max_root = db .iter::<cf::Root>(IteratorMode::End)? .next() .map(|(slot, _)| slot) .unwrap_or(0); let last_root = Arc::new(RwLock::new(max_root)); // Get active transaction-status index or 0 let active_transaction_status_index = db .iter::<cf::TransactionStatusIndex>(IteratorMode::Start)? .next(); let initialize_transaction_status_index = active_transaction_status_index.is_none(); let active_transaction_status_index = active_transaction_status_index .and_then(|(_, data)| { let index0: TransactionStatusIndexMeta = deserialize(&data).unwrap(); if index0.frozen { Some(1) } else { None } }) .unwrap_or(0); measure.stop(); info!("{:?} {}", blockstore_path, measure); let blockstore = Blockstore { ledger_path: ledger_path.to_path_buf(), db, meta_cf, dead_slots_cf, duplicate_slots_cf, erasure_meta_cf, orphans_cf, index_cf, data_shred_cf, code_shred_cf, transaction_status_cf, address_signatures_cf, transaction_status_index_cf, active_transaction_status_index: RwLock::new(active_transaction_status_index), rewards_cf, blocktime_cf, perf_samples_cf, new_shreds_signals: vec![], completed_slots_senders: vec![], insert_shreds_lock: Arc::new(Mutex::new(())), last_root, lowest_cleanup_slot: Arc::new(RwLock::new(0)), no_compaction: false, }; if initialize_transaction_status_index { blockstore.initialize_transaction_status_index()?; } Ok(blockstore) } pub fn open_with_signal( ledger_path: &Path, recovery_mode: Option<BlockstoreRecoveryMode>, enforce_ulimit_nofile: bool, ) -> Result<BlockstoreSignals> { let mut blockstore = Self::open_with_access_type( ledger_path, AccessType::PrimaryOnly, recovery_mode, enforce_ulimit_nofile, )?; let (ledger_signal_sender, ledger_signal_receiver) = sync_channel(1); let (completed_slots_sender1, completed_slots_receiver1) = sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL); let (completed_slots_sender2, completed_slots_receiver2) = sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL); blockstore.new_shreds_signals = vec![ledger_signal_sender]; blockstore.completed_slots_senders = vec![completed_slots_sender1, completed_slots_sender2]; Ok(BlockstoreSignals { blockstore, ledger_signal_receiver, completed_slots_receivers: [completed_slots_receiver1, completed_slots_receiver2], }) } pub fn add_tree( &self, forks: Tree<Slot>, is_orphan: bool, is_slot_complete: bool, num_ticks: u64, starting_hash: Hash, ) { let mut walk = TreeWalk::from(forks); let mut blockhashes = HashMap::new(); while let Some(visit) = walk.get() { let slot = visit.node().data; if self.meta(slot).unwrap().is_some() && self.orphan(slot).unwrap().is_none() { // If slot exists in blockstore and is not an orphan, then skip it walk.forward(); continue; } let parent = walk.get_parent().map(|n| n.data); if parent.is_some() || !is_orphan { let parent_hash = parent // parent won't exist for first node in a tree where // `is_orphan == true` .and_then(|parent| blockhashes.get(&parent)) .unwrap_or(&starting_hash); let mut entries = create_ticks( num_ticks * (std::cmp::max(1, slot - parent.unwrap_or(slot))), 0, *parent_hash, ); blockhashes.insert(slot, entries.last().unwrap().hash); if !is_slot_complete { entries.pop().unwrap(); } let shreds = entries_to_test_shreds( entries.clone(), slot, parent.unwrap_or(slot), is_slot_complete, 0, ); self.insert_shreds(shreds, None, false).unwrap(); } walk.forward(); } } pub fn set_no_compaction(&mut self, no_compaction: bool) { self.no_compaction = no_compaction; } pub fn destroy(ledger_path: &Path) -> Result<()> { // Database::destroy() fails if the path doesn't exist fs::create_dir_all(ledger_path)?; let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY); Database::destroy(&blockstore_path) } pub fn meta(&self, slot: Slot) -> Result<Option<SlotMeta>> { self.meta_cf.get(slot) } pub fn is_full(&self, slot: Slot) -> bool { if let Ok(Some(meta)) = self.meta_cf.get(slot) { return meta.is_full(); } false } pub fn erasure_meta(&self, slot: Slot, set_index: u64) -> Result<Option<ErasureMeta>> { self.erasure_meta_cf.get((slot, set_index)) } pub fn orphan(&self, slot: Slot) -> Result<Option<bool>> { self.orphans_cf.get(slot) } // Get max root or 0 if it doesn't exist pub fn max_root(&self) -> Slot { self.db .iter::<cf::Root>(IteratorMode::End) .expect("Couldn't get rooted iterator for max_root()") .next() .map(|(slot, _)| slot) .unwrap_or(0) } pub fn slot_meta_iterator( &self, slot: Slot, ) -> Result<impl Iterator<Item = (Slot, SlotMeta)> + '_> { let meta_iter = self .db .iter::<cf::SlotMeta>(IteratorMode::From(slot, IteratorDirection::Forward))?; Ok(meta_iter.map(|(slot, slot_meta_bytes)| { ( slot, deserialize(&slot_meta_bytes).unwrap_or_else(|e| { panic!("Could not deserialize SlotMeta for slot {}: {:?}", slot, e) }), ) })) } #[allow(dead_code)] pub fn live_slots_iterator(&self, root: Slot) -> impl Iterator<Item = (Slot, SlotMeta)> + '_ { let root_forks = NextSlotsIterator::new(root, self); let orphans_iter = self.orphans_iterator(root + 1).unwrap(); root_forks.chain(orphans_iter.flat_map(move |orphan| NextSlotsIterator::new(orphan, self))) } pub fn slot_data_iterator( &self, slot: Slot, index: u64, ) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> { let slot_iterator = self.db.iter::<cf::ShredData>(IteratorMode::From( (slot, index), IteratorDirection::Forward, ))?; Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot)) } pub fn slot_coding_iterator( &self, slot: Slot, index: u64, ) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> { let slot_iterator = self.db.iter::<cf::ShredCode>(IteratorMode::From( (slot, index), IteratorDirection::Forward, ))?; Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot)) } pub fn rooted_slot_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = u64> + '_> { let slot_iterator = self .db .iter::<cf::Root>(IteratorMode::From(slot, IteratorDirection::Forward))?; Ok(slot_iterator.map(move |(rooted_slot, _)| rooted_slot)) } fn get_recovery_data_shreds( index: &mut Index, set_index: u64, slot: Slot, erasure_meta: &ErasureMeta, available_shreds: &mut Vec<Shred>, prev_inserted_datas: &mut HashMap<(u64, u64), Shred>, data_cf: &LedgerColumn<cf::ShredData>, ) { (set_index..set_index + erasure_meta.config.num_data() as u64).for_each(|i| { if index.data().is_present(i) { if let Some(shred) = prev_inserted_datas.remove(&(slot, i)).or_else(|| { let some_data = data_cf .get_bytes((slot, i)) .expect("Database failure, could not fetch data shred"); if let Some(data) = some_data { Shred::new_from_serialized_shred(data).ok() } else { warn!("Data shred deleted while reading for recovery"); None } }) { available_shreds.push(shred); } } }); } fn get_recovery_coding_shreds( index: &mut Index, slot: Slot, erasure_meta: &ErasureMeta, available_shreds: &mut Vec<Shred>, prev_inserted_codes: &mut HashMap<(u64, u64), Shred>, code_cf: &LedgerColumn<cf::ShredCode>, ) { (erasure_meta.set_index..erasure_meta.set_index + erasure_meta.config.num_coding() as u64) .for_each(|i| { if let Some(shred) = prev_inserted_codes .remove(&(slot, i)) .map(|s| { // Remove from the index so it doesn't get committed. We know // this is safe to do because everything in // `prev_inserted_codes` does not yet exist in blockstore // (guaranteed by `check_cache_coding_shred`) index.coding_mut().set_present(i, false); s }) .or_else(|| { if index.coding().is_present(i) { let some_code = code_cf .get_bytes((slot, i)) .expect("Database failure, could not fetch code shred"); if let Some(code) = some_code { Shred::new_from_serialized_shred(code).ok() } else { warn!("Code shred deleted while reading for recovery"); None } } else { None } }) { available_shreds.push(shred); } }); } fn recover_shreds( index: &mut Index, set_index: u64, erasure_meta: &ErasureMeta, prev_inserted_datas: &mut HashMap<(u64, u64), Shred>, prev_inserted_codes: &mut HashMap<(u64, u64), Shred>, recovered_data_shreds: &mut Vec<Shred>, data_cf: &LedgerColumn<cf::ShredData>, code_cf: &LedgerColumn<cf::ShredCode>, ) { // Find shreds for this erasure set and try recovery let slot = index.slot; let mut available_shreds = vec![]; Self::get_recovery_data_shreds( index, set_index, slot, erasure_meta, &mut available_shreds, prev_inserted_datas, data_cf, ); Self::get_recovery_coding_shreds( index, slot, erasure_meta, &mut available_shreds, prev_inserted_codes, code_cf, ); if let Ok(mut result) = Shredder::try_recovery( available_shreds, erasure_meta.config.num_data(), erasure_meta.config.num_coding(), set_index as usize, slot, ) { Self::submit_metrics( slot, set_index, erasure_meta, true, "complete".into(), result.len(), ); recovered_data_shreds.append(&mut result); } else { Self::submit_metrics(slot, set_index, erasure_meta, true, "incomplete".into(), 0); } } fn submit_metrics( slot: Slot, set_index: u64, erasure_meta: &ErasureMeta, attempted: bool, status: String, recovered: usize, ) { datapoint_debug!( "blockstore-erasure", ("slot", slot as i64, i64), ("start_index", set_index as i64, i64), ( "end_index", (erasure_meta.set_index + erasure_meta.config.num_data() as u64) as i64, i64 ), ("recovery_attempted", attempted, bool), ("recovery_status", status, String), ("recovered", recovered as i64, i64), ); } fn try_shred_recovery( db: &Database, erasure_metas: &HashMap<(u64, u64), ErasureMeta>, index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>, prev_inserted_datas: &mut HashMap<(u64, u64), Shred>, prev_inserted_codes: &mut HashMap<(u64, u64), Shred>, ) -> Vec<Shred> { let data_cf = db.column::<cf::ShredData>(); let code_cf = db.column::<cf::ShredCode>(); let mut recovered_data_shreds = vec![]; // Recovery rules: // 1. Only try recovery around indexes for which new data or coding shreds are received // 2. For new data shreds, check if an erasure set exists. If not, don't try recovery // 3. Before trying recovery, check if enough number of shreds have been received // 3a. Enough number of shreds = (#data + #coding shreds) > erasure.num_data for (&(slot, set_index), erasure_meta) in erasure_metas.iter() { let index_meta_entry = index_working_set.get_mut(&slot).expect("Index"); let index = &mut index_meta_entry.index; match erasure_meta.status(&index) { ErasureMetaStatus::CanRecover => { Self::recover_shreds( index, set_index, erasure_meta, prev_inserted_datas, prev_inserted_codes, &mut recovered_data_shreds, &data_cf, &code_cf, ); } ErasureMetaStatus::DataFull => { (set_index..set_index + erasure_meta.config.num_coding() as u64).for_each( |i| { // Remove saved coding shreds. We don't need these for future recovery. if prev_inserted_codes.remove(&(slot, i)).is_some() { // Remove from the index so it doesn't get committed. We know // this is safe to do because everything in // `prev_inserted_codes` does not yet exist in blockstore // (guaranteed by `check_cache_coding_shred`) index.coding_mut().set_present(i, false); } }, ); Self::submit_metrics( slot, set_index, erasure_meta, false, "complete".into(), 0, ); } ErasureMetaStatus::StillNeed(needed) => { Self::submit_metrics( slot, set_index, erasure_meta, false, format!("still need: {}", needed), 0, ); } }; } recovered_data_shreds } pub fn insert_shreds_handle_duplicate<F>( &self, shreds: Vec<Shred>, leader_schedule: Option<&Arc<LeaderScheduleCache>>, is_trusted: bool, handle_duplicate: &F, metrics: &mut BlockstoreInsertionMetrics, ) -> Result<(Vec<CompletedDataSetInfo>, Vec<usize>)> where F: Fn(Shred), { let mut total_start = Measure::start("Total elapsed"); let mut start = Measure::start("Blockstore lock"); let _lock = self.insert_shreds_lock.lock().unwrap(); start.stop(); let insert_lock_elapsed = start.as_us(); let db = &*self.db; let mut write_batch = db.batch()?; let mut just_inserted_coding_shreds = HashMap::new(); let mut just_inserted_data_shreds = HashMap::new(); let mut erasure_metas = HashMap::new(); let mut slot_meta_working_set = HashMap::new(); let mut index_working_set = HashMap::new(); let num_shreds = shreds.len(); let mut start = Measure::start("Shred insertion"); let mut num_inserted = 0; let mut index_meta_time = 0; let mut newly_completed_data_sets: Vec<CompletedDataSetInfo> = vec![]; let mut inserted_indices = Vec::new(); shreds.into_iter().enumerate().for_each(|(i, shred)| { if shred.is_data() { let shred_slot = shred.slot(); if let Ok(completed_data_sets) = self.check_insert_data_shred( shred, &mut erasure_metas, &mut index_working_set, &mut slot_meta_working_set, &mut write_batch, &mut just_inserted_data_shreds, &mut index_meta_time, is_trusted, handle_duplicate, leader_schedule, false, ) { newly_completed_data_sets.extend(completed_data_sets.into_iter().map( |(start_index, end_index)| CompletedDataSetInfo { slot: shred_slot, start_index, end_index, }, )); inserted_indices.push(i); num_inserted += 1; } } else if shred.is_code() { self.check_cache_coding_shred( shred, &mut erasure_metas, &mut index_working_set, &mut just_inserted_coding_shreds, &mut index_meta_time, handle_duplicate, is_trusted, ); } else { panic!("There should be no other case"); } }); start.stop(); let insert_shreds_elapsed = start.as_us(); let mut start = Measure::start("Shred recovery"); let mut num_recovered = 0; let mut num_recovered_inserted = 0; let mut num_recovered_failed_sig = 0; let mut num_recovered_failed_invalid = 0; let mut num_recovered_exists = 0; if let Some(leader_schedule_cache) = leader_schedule { let recovered_data = Self::try_shred_recovery( &db, &erasure_metas, &mut index_working_set, &mut just_inserted_data_shreds, &mut just_inserted_coding_shreds, ); num_recovered = recovered_data.len(); recovered_data.into_iter().for_each(|shred| { if let Some(leader) = leader_schedule_cache.slot_leader_at(shred.slot(), None) { let shred_slot = shred.slot(); if shred.verify(&leader) { match self.check_insert_data_shred( shred, &mut erasure_metas, &mut index_working_set, &mut slot_meta_working_set, &mut write_batch, &mut just_inserted_data_shreds, &mut index_meta_time, is_trusted, &handle_duplicate, leader_schedule, true, ) { Err(InsertDataShredError::Exists) => { num_recovered_exists += 1; } Err(InsertDataShredError::InvalidShred) => { num_recovered_failed_invalid += 1; } Err(InsertDataShredError::BlockstoreError(_)) => {} Ok(completed_data_sets) => { newly_completed_data_sets.extend( completed_data_sets.into_iter().map( |(start_index, end_index)| CompletedDataSetInfo { slot: shred_slot, start_index, end_index, }, ), ); num_recovered_inserted += 1; } } } else { num_recovered_failed_sig += 1; } } }); } start.stop(); let shred_recovery_elapsed = start.as_us(); just_inserted_coding_shreds .into_iter() .for_each(|((_, _), shred)| { self.check_insert_coding_shred( shred, &mut index_working_set, &mut write_batch, &mut index_meta_time, ); num_inserted += 1; }); let mut start = Measure::start("Shred recovery"); // Handle chaining for the members of the slot_meta_working_set that were inserted into, // drop the others handle_chaining(&self.db, &mut write_batch, &mut slot_meta_working_set)?; start.stop(); let chaining_elapsed = start.as_us(); let mut start = Measure::start("Commit Working Sets"); let (should_signal, newly_completed_slots) = commit_slot_meta_working_set( &slot_meta_working_set, &self.completed_slots_senders, &mut write_batch, )?; for ((slot, set_index), erasure_meta) in erasure_metas { write_batch.put::<cf::ErasureMeta>((slot, set_index), &erasure_meta)?; } for (&slot, index_working_set_entry) in index_working_set.iter() { if index_working_set_entry.did_insert_occur { write_batch.put::<cf::Index>(slot, &index_working_set_entry.index)?; } } start.stop(); let commit_working_sets_elapsed = start.as_us(); let mut start = Measure::start("Write Batch"); self.db.write(write_batch)?; start.stop(); let write_batch_elapsed = start.as_us(); send_signals( &self.new_shreds_signals, &self.completed_slots_senders, should_signal, newly_completed_slots, ); total_start.stop(); metrics.num_shreds += num_shreds; metrics.total_elapsed += total_start.as_us(); metrics.insert_lock_elapsed += insert_lock_elapsed; metrics.insert_shreds_elapsed += insert_shreds_elapsed; metrics.shred_recovery_elapsed += shred_recovery_elapsed; metrics.chaining_elapsed += chaining_elapsed; metrics.commit_working_sets_elapsed += commit_working_sets_elapsed; metrics.write_batch_elapsed += write_batch_elapsed; metrics.num_inserted += num_inserted; metrics.num_recovered += num_recovered; metrics.num_recovered_inserted += num_recovered_inserted; metrics.num_recovered_failed_sig += num_recovered_failed_sig; metrics.num_recovered_failed_invalid = num_recovered_failed_invalid; metrics.num_recovered_exists = num_recovered_exists; metrics.index_meta_time += index_meta_time; Ok((newly_completed_data_sets, inserted_indices)) } pub fn clear_unconfirmed_slot(&self, slot: Slot) { let _lock = self.insert_shreds_lock.lock().unwrap(); if let Some(mut slot_meta) = self .meta(slot) .expect("Couldn't fetch from SlotMeta column family") { // Clear all slot related information self.run_purge(slot, slot, PurgeType::PrimaryIndex) .expect("Purge database operations failed"); // Reinsert parts of `slot_meta` that are important to retain, like the `next_slots` // field. slot_meta.clear_unconfirmed_slot(); self.meta_cf .put(slot, &slot_meta) .expect("Couldn't insert into SlotMeta column family"); } else { error!( "clear_unconfirmed_slot() called on slot {} with no SlotMeta", slot ); } } pub fn insert_shreds( &self, shreds: Vec<Shred>, leader_schedule: Option<&Arc<LeaderScheduleCache>>, is_trusted: bool, ) -> Result<(Vec<CompletedDataSetInfo>, Vec<usize>)> { self.insert_shreds_handle_duplicate( shreds, leader_schedule, is_trusted, &|_| {}, &mut BlockstoreInsertionMetrics::default(), ) } fn check_insert_coding_shred( &self, shred: Shred, index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>, write_batch: &mut WriteBatch, index_meta_time: &mut u64, ) -> bool { let slot = shred.slot(); let index_meta_working_set_entry = get_index_meta_entry(&self.db, slot, index_working_set, index_meta_time); let index_meta = &mut index_meta_working_set_entry.index; // This gives the index of first coding shred in this FEC block // So, all coding shreds in a given FEC block will have the same set index self.insert_coding_shred(index_meta, &shred, write_batch) .map(|_| { index_meta_working_set_entry.did_insert_occur = true; }) .is_ok() } fn erasure_mismatch(shred1: &Shred, shred2: &Shred) -> bool { shred1.coding_header.num_coding_shreds != shred2.coding_header.num_coding_shreds || shred1.coding_header.num_data_shreds != shred2.coding_header.num_data_shreds } fn check_cache_coding_shred<F>( &self, shred: Shred, erasure_metas: &mut HashMap<(u64, u64), ErasureMeta>, index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>, just_received_coding_shreds: &mut HashMap<(u64, u64), Shred>, index_meta_time: &mut u64, handle_duplicate: &F, is_trusted: bool, ) -> bool where F: Fn(Shred), { let slot = shred.slot(); let shred_index = u64::from(shred.index()); let index_meta_working_set_entry = get_index_meta_entry(&self.db, slot, index_working_set, index_meta_time); let index_meta = &mut index_meta_working_set_entry.index; // This gives the index of first coding shred in this FEC block // So, all coding shreds in a given FEC block will have the same set index if !is_trusted { if index_meta.coding().is_present(shred_index) { handle_duplicate(shred); return false; } if !Blockstore::should_insert_coding_shred(&shred, &self.last_root) { return false; } } let set_index = u64::from(shred.common_header.fec_set_index); let erasure_config = ErasureConfig::new( shred.coding_header.num_data_shreds as usize, shred.coding_header.num_coding_shreds as usize, ); let erasure_meta = erasure_metas.entry((slot, set_index)).or_insert_with(|| { self.erasure_meta_cf .get((slot, set_index)) .expect("Expect database get to succeed") .unwrap_or_else(|| ErasureMeta::new(set_index, erasure_config)) }); if erasure_config != erasure_meta.config { let conflicting_shred = self.find_conflicting_coding_shred( &shred, slot, erasure_meta, just_received_coding_shreds, ); if let Some(conflicting_shred) = conflicting_shred { if self .store_duplicate_if_not_existing(slot, conflicting_shred, shred.payload.clone()) .is_err() { warn!("bad duplicate store.."); } } else { datapoint_info!("bad-conflict-shred", ("slot", slot, i64)); } // ToDo: This is a potential slashing condition warn!("Received multiple erasure configs for the same erasure set!!!"); warn!( "Slot: {}, shred index: {}, set_index: {}, is_duplicate: {}, stored config: {:#?}, new config: {:#?}", slot, shred.index(), set_index, self.has_duplicate_shreds_in_slot(slot), erasure_meta.config, erasure_config ); return false; } // Should be safe to modify index_meta here. Two cases // 1) Recovery happens: Then all inserted erasure metas are removed // from just_received_coding_shreds, and nothing will be committed by // `check_insert_coding_shred`, so the coding index meta will not be // committed index_meta.coding_mut().set_present(shred_index, true); just_received_coding_shreds .entry((slot, shred_index)) .or_insert_with(|| shred); true } fn find_conflicting_coding_shred( &self, shred: &Shred, slot: Slot, erasure_meta: &ErasureMeta, just_received_coding_shreds: &mut HashMap<(u64, u64), Shred>, ) -> Option<Vec<u8>> { // Search for the shred which set the initial erasure config, either inserted, // or in the current batch in just_received_coding_shreds. let coding_indices = erasure_meta.set_index ..erasure_meta.set_index + erasure_meta.config.num_coding() as u64; let mut conflicting_shred = None; for coding_index in coding_indices { let maybe_shred = self.get_coding_shred(slot, coding_index); if let Ok(Some(shred_data)) = maybe_shred { let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap(); if Self::erasure_mismatch(&potential_shred, &shred) { conflicting_shred = Some(potential_shred.payload); } break; } else if let Some(potential_shred) = just_received_coding_shreds.get(&(slot, coding_index)) { if Self::erasure_mismatch(&potential_shred, &shred) { conflicting_shred = Some(potential_shred.payload.clone()); } break; } } conflicting_shred } #[allow(clippy::too_many_arguments)] fn check_insert_data_shred<F>( &self, shred: Shred, erasure_metas: &mut HashMap<(u64, u64), ErasureMeta>, index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>, slot_meta_working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>, write_batch: &mut WriteBatch, just_inserted_data_shreds: &mut HashMap<(u64, u64), Shred>, index_meta_time: &mut u64, is_trusted: bool, handle_duplicate: &F, leader_schedule: Option<&Arc<LeaderScheduleCache>>, is_recovered: bool, ) -> std::result::Result<Vec<(u32, u32)>, InsertDataShredError> where F: Fn(Shred), { let slot = shred.slot(); let shred_index = u64::from(shred.index()); let index_meta_working_set_entry = get_index_meta_entry(&self.db, slot, index_working_set, index_meta_time); let index_meta = &mut index_meta_working_set_entry.index; let slot_meta_entry = get_slot_meta_entry(&self.db, slot_meta_working_set, slot, shred.parent()); let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut(); if !is_trusted { if Self::is_data_shred_present(&shred, slot_meta, &index_meta.data()) { handle_duplicate(shred); return Err(InsertDataShredError::Exists); } else if !self.should_insert_data_shred( &shred, slot_meta, just_inserted_data_shreds, &self.last_root, leader_schedule, is_recovered, ) { return Err(InsertDataShredError::InvalidShred); } } let set_index = u64::from(shred.common_header.fec_set_index); let newly_completed_data_sets = self.insert_data_shred(slot_meta, index_meta.data_mut(), &shred, write_batch)?; just_inserted_data_shreds.insert((slot, shred_index), shred); index_meta_working_set_entry.did_insert_occur = true; slot_meta_entry.did_insert_occur = true; if !erasure_metas.contains_key(&(slot, set_index)) { if let Some(meta) = self .erasure_meta_cf .get((slot, set_index)) .expect("Expect database get to succeed") { erasure_metas.insert((slot, set_index), meta); } } Ok(newly_completed_data_sets) } fn should_insert_coding_shred(shred: &Shred, last_root: &RwLock<u64>) -> bool { let slot = shred.slot(); let shred_index = shred.index(); if shred.is_data() || shred_index < u32::from(shred.coding_header.position) { return false; } let set_index = shred.common_header.fec_set_index; !(shred.coding_header.num_coding_shreds == 0 || shred.coding_header.position >= shred.coding_header.num_coding_shreds || std::u32::MAX - set_index < u32::from(shred.coding_header.num_coding_shreds) - 1 || slot <= *last_root.read().unwrap() || shred.coding_header.num_coding_shreds as u32 > (8 * crate::shred::MAX_DATA_SHREDS_PER_FEC_BLOCK)) } fn insert_coding_shred( &self, index_meta: &mut Index, shred: &Shred, write_batch: &mut WriteBatch, ) -> Result<()> { let slot = shred.slot(); let shred_index = u64::from(shred.index()); // Assert guaranteed by integrity checks on the shred that happen before // `insert_coding_shred` is called assert!(shred.is_code() && shred_index >= u64::from(shred.coding_header.position)); // Commit step: commit all changes to the mutable structures at once, or none at all. // We don't want only a subset of these changes going through. write_batch.put_bytes::<cf::ShredCode>((slot, shred_index), &shred.payload)?; index_meta.coding_mut().set_present(shred_index, true); Ok(()) } fn is_data_shred_present(shred: &Shred, slot_meta: &SlotMeta, data_index: &ShredIndex) -> bool { let shred_index = u64::from(shred.index()); // Check that the shred doesn't already exist in blockstore shred_index < slot_meta.consumed || data_index.is_present(shred_index) } fn get_data_shred_from_just_inserted_or_db<'a>( &'a self, just_inserted_data_shreds: &'a HashMap<(u64, u64), Shred>, slot: Slot, index: u64, ) -> Cow<'a, Vec<u8>> { if let Some(shred) = just_inserted_data_shreds.get(&(slot, index)) { Cow::Borrowed(&shred.payload) } else { // If it doesn't exist in the just inserted set, it must exist in // the backing store Cow::Owned(self.get_data_shred(slot, index).unwrap().unwrap()) } } fn should_insert_data_shred( &self, shred: &Shred, slot_meta: &SlotMeta, just_inserted_data_shreds: &HashMap<(u64, u64), Shred>, last_root: &RwLock<u64>, leader_schedule: Option<&Arc<LeaderScheduleCache>>, is_recovered: bool, ) -> bool { use crate::shred::SHRED_PAYLOAD_SIZE; let shred_index = u64::from(shred.index()); let slot = shred.slot(); let last_in_slot = if shred.last_in_slot() { debug!("got last in slot"); true } else { false }; if shred.data_header.size == 0 { return false; } if shred.payload.len() > SHRED_PAYLOAD_SIZE { return false; } // Check that we do not receive shred_index >= than the last_index // for the slot let last_index = slot_meta.last_index; if shred_index >= last_index { let leader_pubkey = leader_schedule .map(|leader_schedule| leader_schedule.slot_leader_at(slot, None)) .unwrap_or(None); let ending_shred: Cow<Vec<u8>> = self.get_data_shred_from_just_inserted_or_db( just_inserted_data_shreds, slot, last_index, ); if self .store_duplicate_if_not_existing( slot, ending_shred.into_owned(), shred.payload.clone(), ) .is_err() { warn!("store duplicate error"); } datapoint_error!( "blockstore_error", ( "error", format!( "Leader {:?}, slot {}: received index {} >= slot.last_index {}, is_recovered: {}", leader_pubkey, slot, shred_index, last_index, is_recovered ), String ) ); return false; } // Check that we do not receive a shred with "last_index" true, but shred_index // less than our current received if last_in_slot && shred_index < slot_meta.received { let leader_pubkey = leader_schedule .map(|leader_schedule| leader_schedule.slot_leader_at(slot, None)) .unwrap_or(None); let ending_shred: Cow<Vec<u8>> = self.get_data_shred_from_just_inserted_or_db( just_inserted_data_shreds, slot, slot_meta.received - 1, ); if self .store_duplicate_if_not_existing( slot, ending_shred.into_owned(), shred.payload.clone(), ) .is_err() { warn!("store duplicate error"); } datapoint_error!( "blockstore_error", ( "error", format!( "Leader {:?}, slot {}: received shred_index {} < slot.received {}, is_recovered: {}", leader_pubkey, slot, shred_index, slot_meta.received, is_recovered ), String ) ); return false; } let last_root = *last_root.read().unwrap(); verify_shred_slots(slot, slot_meta.parent_slot, last_root) } fn insert_data_shred( &self, slot_meta: &mut SlotMeta, data_index: &mut ShredIndex, shred: &Shred, write_batch: &mut WriteBatch, ) -> Result<Vec<(u32, u32)>> { let slot = shred.slot(); let index = u64::from(shred.index()); let last_in_slot = if shred.last_in_slot() { debug!("got last in slot"); true } else { false }; let last_in_data = if shred.data_complete() { debug!("got last in data"); true } else { false }; // Parent for slot meta should have been set by this point assert!(!is_orphan(slot_meta)); let new_consumed = if slot_meta.consumed == index { let mut current_index = index + 1; while data_index.is_present(current_index) { current_index += 1; } current_index } else { slot_meta.consumed }; // Commit step: commit all changes to the mutable structures at once, or none at all. // We don't want only a subset of these changes going through. write_batch.put_bytes::<cf::ShredData>( (slot, index), // Payload will be padded out to SHRED_PAYLOAD_SIZE // But only need to store the bytes within data_header.size &shred.payload[..shred.data_header.size as usize], )?; data_index.set_present(index, true); let newly_completed_data_sets = update_slot_meta( last_in_slot, last_in_data, slot_meta, index as u32, new_consumed, shred.reference_tick(), &data_index, ); if slot_meta.is_full() { datapoint_info!( "shred_insert_is_full", ( "total_time_ms", solana_sdk::timing::timestamp() - slot_meta.first_shred_timestamp, i64 ), ("slot", slot_meta.slot, i64), ("last_index", slot_meta.last_index, i64), ); } trace!("inserted shred into slot {:?} and index {:?}", slot, index); Ok(newly_completed_data_sets) } pub fn get_data_shred(&self, slot: Slot, index: u64) -> Result<Option<Vec<u8>>> { use crate::shred::SHRED_PAYLOAD_SIZE; self.data_shred_cf.get_bytes((slot, index)).map(|data| { data.map(|mut d| { // Only data_header.size bytes stored in the blockstore so // pad the payload out to SHRED_PAYLOAD_SIZE so that the // erasure recovery works properly. d.resize(cmp::max(d.len(), SHRED_PAYLOAD_SIZE), 0); d }) }) } pub fn get_data_shreds_for_slot( &self, slot: Slot, start_index: u64, ) -> ShredResult<Vec<Shred>> { self.slot_data_iterator(slot, start_index) .expect("blockstore couldn't fetch iterator") .map(|data| Shred::new_from_serialized_shred(data.1.to_vec())) .collect() } pub fn get_data_shreds( &self, slot: Slot, from_index: u64, to_index: u64, buffer: &mut [u8], ) -> Result<(u64, usize)> { // lowest_cleanup_slot is the last slot that was not cleaned up by // LedgerCleanupService let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap(); if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot { return Err(BlockstoreError::SlotCleanedUp); } let meta_cf = self.db.column::<cf::SlotMeta>(); let mut buffer_offset = 0; let mut last_index = 0; if let Some(meta) = meta_cf.get(slot)? { if !meta.is_full() { warn!("The slot is not yet full. Will not return any shreds"); return Ok((last_index, buffer_offset)); } let to_index = cmp::min(to_index, meta.consumed); for index in from_index..to_index { if let Some(shred_data) = self.get_data_shred(slot, index)? { let shred_len = shred_data.len(); if buffer.len().saturating_sub(buffer_offset) >= shred_len { buffer[buffer_offset..buffer_offset + shred_len] .copy_from_slice(&shred_data[..shred_len]); buffer_offset += shred_len; last_index = index; // All shreds are of the same length. // Let's check if we have scope to accommodate another shred // If not, let's break right away, as it'll save on 1 DB read if buffer.len().saturating_sub(buffer_offset) < shred_len { break; } } else { break; } } } } Ok((last_index, buffer_offset)) } pub fn get_coding_shred(&self, slot: Slot, index: u64) -> Result<Option<Vec<u8>>> { self.code_shred_cf.get_bytes((slot, index)) } pub fn get_coding_shreds_for_slot( &self, slot: Slot, start_index: u64, ) -> ShredResult<Vec<Shred>> { self.slot_coding_iterator(slot, start_index) .expect("blockstore couldn't fetch iterator") .map(|code| Shred::new_from_serialized_shred(code.1.to_vec())) .collect() } // Only used by tests #[allow(clippy::too_many_arguments)] pub(crate) fn write_entries( &self, start_slot: Slot, num_ticks_in_start_slot: u64, start_index: u32, ticks_per_slot: u64, parent: Option<u64>, is_full_slot: bool, keypair: &Arc<Keypair>, entries: Vec<Entry>, version: u16, ) -> Result<usize /*num of data shreds*/> { let mut parent_slot = parent.map_or(start_slot.saturating_sub(1), |v| v); let num_slots = (start_slot - parent_slot).max(1); // Note: slot 0 has parent slot 0 assert!(num_ticks_in_start_slot < num_slots * ticks_per_slot); let mut remaining_ticks_in_slot = num_slots * ticks_per_slot - num_ticks_in_start_slot; let mut current_slot = start_slot; let mut shredder = Shredder::new(current_slot, parent_slot, keypair.clone(), 0, version).unwrap(); let mut all_shreds = vec![]; let mut slot_entries = vec![]; // Find all the entries for start_slot for entry in entries.into_iter() { if remaining_ticks_in_slot == 0 { current_slot += 1; parent_slot = current_slot - 1; remaining_ticks_in_slot = ticks_per_slot; let mut current_entries = vec![]; std::mem::swap(&mut slot_entries, &mut current_entries); let start_index = { if all_shreds.is_empty() { start_index } else { 0 } }; let (mut data_shreds, mut coding_shreds, _) = shredder.entries_to_shreds(&current_entries, true, start_index); all_shreds.append(&mut data_shreds); all_shreds.append(&mut coding_shreds); shredder = Shredder::new( current_slot, parent_slot, keypair.clone(), (ticks_per_slot - remaining_ticks_in_slot) as u8, version, ) .unwrap(); } if entry.is_tick() { remaining_ticks_in_slot -= 1; } slot_entries.push(entry); } if !slot_entries.is_empty() { let (mut data_shreds, mut coding_shreds, _) = shredder.entries_to_shreds(&slot_entries, is_full_slot, 0); all_shreds.append(&mut data_shreds); all_shreds.append(&mut coding_shreds); } let num_data = all_shreds.iter().filter(|shred| shred.is_data()).count(); self.insert_shreds(all_shreds, None, false)?; Ok(num_data) } pub fn get_index(&self, slot: Slot) -> Result<Option<Index>> { self.index_cf.get(slot) } /// Manually update the meta for a slot. /// Can interfere with automatic meta update and potentially break chaining. /// Dangerous. Use with care. pub fn put_meta_bytes(&self, slot: Slot, bytes: &[u8]) -> Result<()> { self.meta_cf.put_bytes(slot, bytes) } // Given a start and end entry index, find all the missing // indexes in the ledger in the range [start_index, end_index) // for the slot with the specified slot fn find_missing_indexes<C>( db_iterator: &mut DBRawIterator, slot: Slot, first_timestamp: u64, start_index: u64, end_index: u64, max_missing: usize, ) -> Vec<u64> where C: Column<Index = (u64, u64)>, { if start_index >= end_index || max_missing == 0 { return vec![]; } let mut missing_indexes = vec![]; let ticks_since_first_insert = DEFAULT_TICKS_PER_SECOND * (timestamp() - first_timestamp) / 1000; // Seek to the first shred with index >= start_index db_iterator.seek(&C::key((slot, start_index))); // The index of the first missing shred in the slot let mut prev_index = start_index; 'outer: loop { if !db_iterator.valid() { for i in prev_index..end_index { missing_indexes.push(i); if missing_indexes.len() == max_missing { break; } } break; } let (current_slot, index) = C::index(&db_iterator.key().expect("Expect a valid key")); let current_index = { if current_slot > slot { end_index } else { index } }; let upper_index = cmp::min(current_index, end_index); // the tick that will be used to figure out the timeout for this hole let reference_tick = u64::from(Shred::reference_tick_from_data( &db_iterator.value().expect("couldn't read value"), )); if ticks_since_first_insert < reference_tick + MAX_TURBINE_DELAY_IN_TICKS { // The higher index holes have not timed out yet break 'outer; } for i in prev_index..upper_index { missing_indexes.push(i); if missing_indexes.len() == max_missing { break 'outer; } } if current_slot > slot { break; } if current_index >= end_index { break; } prev_index = current_index + 1; db_iterator.next(); } missing_indexes } pub fn find_missing_data_indexes( &self, slot: Slot, first_timestamp: u64, start_index: u64, end_index: u64, max_missing: usize, ) -> Vec<u64> { if let Ok(mut db_iterator) = self .db .raw_iterator_cf(self.db.cf_handle::<cf::ShredData>()) { Self::find_missing_indexes::<cf::ShredData>( &mut db_iterator, slot, first_timestamp, start_index, end_index, max_missing, ) } else { vec![] } } pub fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> { datapoint_info!( "blockstore-rpc-api", ("method", "get_block_time".to_string(), String) ); let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap(); // lowest_cleanup_slot is the last slot that was not cleaned up by // LedgerCleanupService if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot { return Err(BlockstoreError::SlotCleanedUp); } self.blocktime_cf.get(slot) } pub fn cache_block_time(&self, slot: Slot, timestamp: UnixTimestamp) -> Result<()> { if !self.is_root(slot) { return Err(BlockstoreError::SlotNotRooted); } self.blocktime_cf.put(slot, &timestamp) } pub fn get_first_available_block(&self) -> Result<Slot> { let mut root_iterator = self.rooted_slot_iterator(self.lowest_slot())?; Ok(root_iterator.next().unwrap_or_default()) } pub fn get_rooted_block( &self, slot: Slot, require_previous_blockhash: bool, ) -> Result<ConfirmedBlock> { datapoint_info!( "blockstore-rpc-api", ("method", "get_rooted_block".to_string(), String) ); let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap(); // lowest_cleanup_slot is the last slot that was not cleaned up by // LedgerCleanupService if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot { return Err(BlockstoreError::SlotCleanedUp); } if self.is_root(slot) { return self.get_complete_block(slot, require_previous_blockhash); } Err(BlockstoreError::SlotNotRooted) } pub fn get_complete_block( &self, slot: Slot, require_previous_blockhash: bool, ) -> Result<ConfirmedBlock> { let slot_meta_cf = self.db.column::<cf::SlotMeta>(); let slot_meta = match slot_meta_cf.get(slot)? { Some(slot_meta) => slot_meta, None => { info!("SlotMeta not found for slot {}", slot); return Err(BlockstoreError::SlotUnavailable); } }; if slot_meta.is_full() { let slot_entries = self.get_slot_entries(slot, 0)?; if !slot_entries.is_empty() { let slot_transaction_iterator = slot_entries .iter() .cloned() .flat_map(|entry| entry.transactions) .map(|transaction| { if let Err(err) = transaction.sanitize() { warn!( "Blockstore::get_block sanitize failed: {:?}, \ slot: {:?}, \ {:?}", err, slot, transaction, ); } transaction }); let parent_slot_entries = self .get_slot_entries(slot_meta.parent_slot, 0) .unwrap_or_default(); if parent_slot_entries.is_empty() && require_previous_blockhash { return Err(BlockstoreError::ParentEntriesUnavailable); } let previous_blockhash = if !parent_slot_entries.is_empty() { get_last_hash(parent_slot_entries.iter()).unwrap() } else { Hash::default() }; let blockhash = get_last_hash(slot_entries.iter()) .unwrap_or_else(|| panic!("Rooted slot {:?} must have blockhash", slot)); let rewards = self .rewards_cf .get_protobuf_or_bincode::<StoredExtendedRewards>(slot)? .unwrap_or_default() .into(); let block_time = self.blocktime_cf.get(slot)?; let block = ConfirmedBlock { previous_blockhash: previous_blockhash.to_string(), blockhash: blockhash.to_string(), parent_slot: slot_meta.parent_slot, transactions: self .map_transactions_to_statuses(slot, slot_transaction_iterator), rewards, block_time, }; return Ok(block); } } Err(BlockstoreError::SlotUnavailable) } fn map_transactions_to_statuses<'a>( &self, slot: Slot, iterator: impl Iterator<Item = Transaction> + 'a, ) -> Vec<TransactionWithStatusMeta> { iterator .map(|transaction| { let signature = transaction.signatures[0]; TransactionWithStatusMeta { transaction, meta: self .read_transaction_status((signature, slot)) .ok() .flatten(), } }) .collect() } /// Initializes the TransactionStatusIndex column family with two records, `0` and `1`, /// which are used as the primary index for entries in the TransactionStatus and /// AddressSignatures columns. At any given time, one primary index is active (ie. new records /// are stored under this index), the other is frozen. fn initialize_transaction_status_index(&self) -> Result<()> { self.transaction_status_index_cf .put(0, &TransactionStatusIndexMeta::default())?; self.transaction_status_index_cf .put(1, &TransactionStatusIndexMeta::default())?; // This dummy status improves compaction performance let default_status = TransactionStatusMeta::default().into(); self.transaction_status_cf .put_protobuf(cf::TransactionStatus::as_index(2), &default_status)?; self.address_signatures_cf.put( cf::AddressSignatures::as_index(2), &AddressSignatureMeta::default(), ) } /// Toggles the active primary index between `0` and `1`, and clears the stored max-slot of the /// frozen index in preparation for pruning. fn toggle_transaction_status_index( &self, batch: &mut WriteBatch, w_active_transaction_status_index: &mut u64, to_slot: Slot, ) -> Result<Option<u64>> { let index0 = self.transaction_status_index_cf.get(0)?; if index0.is_none() { return Ok(None); } let mut index0 = index0.unwrap(); let mut index1 = self.transaction_status_index_cf.get(1)?.unwrap(); if !index0.frozen && !index1.frozen { index0.frozen = true; *w_active_transaction_status_index = 1; batch.put::<cf::TransactionStatusIndex>(0, &index0)?; Ok(None) } else { let result = if index0.frozen && to_slot > index0.max_slot { debug!("Pruning transaction index 0 at slot {}", index0.max_slot); Some(0) } else if index1.frozen && to_slot > index1.max_slot { debug!("Pruning transaction index 1 at slot {}", index1.max_slot); Some(1) } else { None }; if result.is_some() { *w_active_transaction_status_index = if index0.frozen { 0 } else { 1 }; if index0.frozen { index0.max_slot = 0 }; index0.frozen = !index0.frozen; batch.put::<cf::TransactionStatusIndex>(0, &index0)?; if index1.frozen { index1.max_slot = 0 }; index1.frozen = !index1.frozen; batch.put::<cf::TransactionStatusIndex>(1, &index1)?; } Ok(result) } } fn get_primary_index( &self, slot: Slot, w_active_transaction_status_index: &mut u64, ) -> Result<u64> { let i = *w_active_transaction_status_index; let mut index_meta = self.transaction_status_index_cf.get(i)?.unwrap(); if slot > index_meta.max_slot { assert!(!index_meta.frozen); index_meta.max_slot = slot; self.transaction_status_index_cf.put(i, &index_meta)?; } Ok(i) } pub fn read_transaction_status( &self, index: (Signature, Slot), ) -> Result<Option<TransactionStatusMeta>> { let (signature, slot) = index; let result = self .transaction_status_cf .get_protobuf_or_bincode::<StoredTransactionStatusMeta>((0, signature, slot))?; if result.is_none() { Ok(self .transaction_status_cf .get_protobuf_or_bincode::<StoredTransactionStatusMeta>((1, signature, slot))? .and_then(|meta| meta.try_into().ok())) } else { Ok(result.and_then(|meta| meta.try_into().ok())) } } pub fn write_transaction_status( &self, slot: Slot, signature: Signature, writable_keys: Vec<&Pubkey>, readonly_keys: Vec<&Pubkey>, status: TransactionStatusMeta, ) -> Result<()> { let status = status.into(); // This write lock prevents interleaving issues with the transaction_status_index_cf by gating // writes to that column let mut w_active_transaction_status_index = self.active_transaction_status_index.write().unwrap(); let primary_index = self.get_primary_index(slot, &mut w_active_transaction_status_index)?; self.transaction_status_cf .put_protobuf((primary_index, signature, slot), &status)?; for address in writable_keys { self.address_signatures_cf.put( (primary_index, *address, slot, signature), &AddressSignatureMeta { writeable: true }, )?; } for address in readonly_keys { self.address_signatures_cf.put( (primary_index, *address, slot, signature), &AddressSignatureMeta { writeable: false }, )?; } Ok(()) } // Returns a transaction status, as well as a loop counter for unit testing fn get_transaction_status_with_counter( &self, signature: Signature, confirmed_unrooted_slots: &[Slot], ) -> Result<(Option<(Slot, TransactionStatusMeta)>, u64)> { let mut counter = 0; for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.transaction_status_cf.iter(IteratorMode::From( (transaction_status_cf_primary_index, signature, 0), IteratorDirection::Forward, ))?; for ((i, sig, slot), _data) in index_iterator { counter += 1; if i != transaction_status_cf_primary_index || sig != signature { break; } if !self.is_root(slot) && !confirmed_unrooted_slots.contains(&slot) { continue; } let status = self .transaction_status_cf .get_protobuf_or_bincode::<StoredTransactionStatusMeta>((i, sig, slot))? .and_then(|status| status.try_into().ok()) .map(|status| (slot, status)); return Ok((status, counter)); } } Ok((None, counter)) } /// Returns a transaction status pub fn get_rooted_transaction_status( &self, signature: Signature, ) -> Result<Option<(Slot, TransactionStatusMeta)>> { datapoint_info!( "blockstore-rpc-api", ( "method", "get_rooted_transaction_status".to_string(), String ) ); self.get_transaction_status(signature, &[]) } /// Returns a transaction status pub fn get_transaction_status( &self, signature: Signature, confirmed_unrooted_slots: &[Slot], ) -> Result<Option<(Slot, TransactionStatusMeta)>> { datapoint_info!( "blockstore-rpc-api", ("method", "get_transaction_status".to_string(), String) ); self.get_transaction_status_with_counter(signature, confirmed_unrooted_slots) .map(|(status, _)| status) } /// Returns a complete transaction if it was processed in a root pub fn get_rooted_transaction( &self, signature: Signature, ) -> Result<Option<ConfirmedTransaction>> { datapoint_info!( "blockstore-rpc-api", ("method", "get_rooted_transaction".to_string(), String) ); self.get_transaction_with_status(signature, &[]) } /// Returns a complete transaction pub fn get_complete_transaction( &self, signature: Signature, highest_confirmed_slot: Slot, ) -> Result<Option<ConfirmedTransaction>> { datapoint_info!( "blockstore-rpc-api", ("method", "get_complete_transaction".to_string(), String) ); let last_root = self.last_root(); let confirmed_unrooted_slots: Vec<_> = AncestorIterator::new_inclusive(highest_confirmed_slot, self) .take_while(|&slot| slot > last_root) .collect(); self.get_transaction_with_status(signature, &confirmed_unrooted_slots) } fn get_transaction_with_status( &self, signature: Signature, confirmed_unrooted_slots: &[Slot], ) -> Result<Option<ConfirmedTransaction>> { if let Some((slot, status)) = self.get_transaction_status(signature, confirmed_unrooted_slots)? { let transaction = self .find_transaction_in_slot(slot, signature)? .ok_or(BlockstoreError::TransactionStatusSlotMismatch)?; // Should not happen let block_time = self.get_block_time(slot)?; Ok(Some(ConfirmedTransaction { slot, transaction: TransactionWithStatusMeta { transaction, meta: Some(status), }, block_time, })) } else { Ok(None) } } fn find_transaction_in_slot( &self, slot: Slot, signature: Signature, ) -> Result<Option<Transaction>> { let slot_entries = self.get_slot_entries(slot, 0)?; Ok(slot_entries .iter() .cloned() .flat_map(|entry| entry.transactions) .map(|transaction| { if let Err(err) = transaction.sanitize() { warn!( "Blockstore::find_transaction_in_slot sanitize failed: {:?}, \ slot: {:?}, \ {:?}", err, slot, transaction, ); } transaction }) .find(|transaction| transaction.signatures[0] == signature)) } // Returns all rooted signatures for an address, ordered by slot that the transaction was // processed in. Within each slot the transactions will be ordered by signature, and NOT by // the order in which the transactions exist in the block // // DEPRECATED fn find_address_signatures( &self, pubkey: Pubkey, start_slot: Slot, end_slot: Slot, ) -> Result<Vec<(Slot, Signature)>> { let mut signatures: Vec<(Slot, Signature)> = vec![]; for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.address_signatures_cf.iter(IteratorMode::From( ( transaction_status_cf_primary_index, pubkey, start_slot, Signature::default(), ), IteratorDirection::Forward, ))?; for ((i, address, slot, signature), _) in index_iterator { if i != transaction_status_cf_primary_index || slot > end_slot || address != pubkey { break; } if self.is_root(slot) { signatures.push((slot, signature)); } } } signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap().then(a.1.cmp(&b.1))); Ok(signatures) } // Returns all signatures for an address in a particular slot, regardless of whether that slot // has been rooted. The transactions will be ordered by signature, and NOT by the order in // which the transactions exist in the block fn find_address_signatures_for_slot( &self, pubkey: Pubkey, slot: Slot, ) -> Result<Vec<(Slot, Signature)>> { let mut signatures: Vec<(Slot, Signature)> = vec![]; for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.address_signatures_cf.iter(IteratorMode::From( ( transaction_status_cf_primary_index, pubkey, slot, Signature::default(), ), IteratorDirection::Forward, ))?; for ((i, address, transaction_slot, signature), _) in index_iterator { if i != transaction_status_cf_primary_index || transaction_slot > slot || address != pubkey { break; } signatures.push((slot, signature)); } } signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap().then(a.1.cmp(&b.1))); Ok(signatures) } // DEPRECATED pub fn get_confirmed_signatures_for_address( &self, pubkey: Pubkey, start_slot: Slot, end_slot: Slot, ) -> Result<Vec<Signature>> { datapoint_info!( "blockstore-rpc-api", ( "method", "get_confirmed_signatures_for_address".to_string(), String ) ); self.find_address_signatures(pubkey, start_slot, end_slot) .map(|signatures| signatures.iter().map(|(_, signature)| *signature).collect()) } pub fn get_confirmed_signatures_for_address2( &self, address: Pubkey, highest_slot: Slot, // highest_confirmed_root or highest_confirmed_slot before: Option<Signature>, until: Option<Signature>, limit: usize, ) -> Result<Vec<ConfirmedTransactionStatusWithSignature>> { datapoint_info!( "blockstore-rpc-api", ( "method", "get_confirmed_signatures_for_address2".to_string(), String ) ); let last_root = self.last_root(); let confirmed_unrooted_slots: Vec<_> = AncestorIterator::new_inclusive(highest_slot, self) .take_while(|&slot| slot > last_root) .collect(); // Figure the `slot` to start listing signatures at, based on the ledger location of the // `before` signature if present. Also generate a HashSet of signatures that should // be excluded from the results. let mut get_before_slot_timer = Measure::start("get_before_slot_timer"); let (slot, mut before_excluded_signatures) = match before { None => (highest_slot, None), Some(before) => { let transaction_status = self.get_transaction_status(before, &confirmed_unrooted_slots)?; match transaction_status { None => return Ok(vec![]), Some((slot, _)) => { let block = self.get_complete_block(slot, false).map_err(|err| { BlockstoreError::Io(IoError::new( ErrorKind::Other, format!("Unable to get block: {}", err), )) })?; // Load all signatures for the block let mut slot_signatures: Vec<_> = block .transactions .into_iter() .filter_map(|transaction_with_meta| { transaction_with_meta .transaction .signatures .into_iter() .next() }) .collect(); // Sort signatures as a way to entire a stable ordering within a slot, as // the AddressSignatures column is ordered by signatures within a slot, // not by block ordering slot_signatures.sort(); slot_signatures.reverse(); if let Some(pos) = slot_signatures.iter().position(|&x| x == before) { slot_signatures.truncate(pos + 1); } ( slot, Some(slot_signatures.into_iter().collect::<HashSet<_>>()), ) } } } }; get_before_slot_timer.stop(); // Generate a HashSet of signatures that should be excluded from the results based on // `until` signature let mut get_until_slot_timer = Measure::start("get_until_slot_timer"); let (lowest_slot, until_excluded_signatures) = match until { None => (0, HashSet::new()), Some(until) => { let transaction_status = self.get_transaction_status(until, &confirmed_unrooted_slots)?; match transaction_status { None => (0, HashSet::new()), Some((slot, _)) => { let block = self.get_complete_block(slot, false).map_err(|err| { BlockstoreError::Io(IoError::new( ErrorKind::Other, format!("Unable to get block: {}", err), )) })?; // Load all signatures for the block let mut slot_signatures: Vec<_> = block .transactions .into_iter() .filter_map(|transaction_with_meta| { transaction_with_meta .transaction .signatures .into_iter() .next() }) .collect(); // Sort signatures as a way to entire a stable ordering within a slot, as // the AddressSignatures column is ordered by signatures within a slot, // not by block ordering slot_signatures.sort(); slot_signatures.reverse(); if let Some(pos) = slot_signatures.iter().position(|&x| x == until) { slot_signatures = slot_signatures.split_off(pos); } (slot, slot_signatures.into_iter().collect::<HashSet<_>>()) } }
// Fetch the list of signatures that affect the given address let first_available_block = self.get_first_available_block()?; let mut address_signatures = vec![]; // Get signatures in `slot` let mut get_initial_slot_timer = Measure::start("get_initial_slot_timer"); let mut signatures = self.find_address_signatures_for_slot(address, slot)?; signatures.reverse(); if let Some(excluded_signatures) = before_excluded_signatures.take() { address_signatures.extend( signatures .into_iter() .filter(|(_, signature)| !excluded_signatures.contains(&signature)), ) } else { address_signatures.append(&mut signatures); } get_initial_slot_timer.stop(); // Check the active_transaction_status_index to see if it contains slot. If so, start with // that index, as it will contain higher slots let starting_primary_index = *self.active_transaction_status_index.read().unwrap(); let next_primary_index = if starting_primary_index == 0 { 1 } else { 0 }; let next_max_slot = self .transaction_status_index_cf .get(next_primary_index)? .unwrap() .max_slot; let mut starting_primary_index_iter_timer = Measure::start("starting_primary_index_iter"); if slot > next_max_slot { let mut starting_iterator = self.address_signatures_cf.iter(IteratorMode::From( (starting_primary_index, address, slot, Signature::default()), IteratorDirection::Reverse, ))?; // Iterate through starting_iterator until limit is reached while address_signatures.len() < limit { if let Some(((i, key_address, slot, signature), _)) = starting_iterator.next() { if slot == next_max_slot || slot < lowest_slot { break; } if i == starting_primary_index && key_address == address && slot >= first_available_block { if self.is_root(slot) || confirmed_unrooted_slots.contains(&slot) { address_signatures.push((slot, signature)); } continue; } } break; } // Handle slots that cross primary indexes if next_max_slot >= lowest_slot { let mut signatures = self.find_address_signatures_for_slot(address, next_max_slot)?; signatures.reverse(); address_signatures.append(&mut signatures); } } starting_primary_index_iter_timer.stop(); // Iterate through next_iterator until limit is reached let mut next_primary_index_iter_timer = Measure::start("next_primary_index_iter_timer"); let mut next_iterator = self.address_signatures_cf.iter(IteratorMode::From( (next_primary_index, address, slot, Signature::default()), IteratorDirection::Reverse, ))?; while address_signatures.len() < limit { if let Some(((i, key_address, slot, signature), _)) = next_iterator.next() { // Skip next_max_slot, which is already included if slot == next_max_slot { continue; } if slot < lowest_slot { break; } if i == next_primary_index && key_address == address && slot >= first_available_block { if self.is_root(slot) || confirmed_unrooted_slots.contains(&slot) { address_signatures.push((slot, signature)); } continue; } } break; } next_primary_index_iter_timer.stop(); let mut address_signatures: Vec<(Slot, Signature)> = address_signatures .into_iter() .filter(|(_, signature)| !until_excluded_signatures.contains(&signature)) .collect(); address_signatures.truncate(limit); // Fill in the status information for each found transaction let mut get_status_info_timer = Measure::start("get_status_info_timer"); let mut infos = vec![]; for (slot, signature) in address_signatures.into_iter() { let transaction_status = self.get_transaction_status(signature, &confirmed_unrooted_slots)?; let err = transaction_status.and_then(|(_slot, status)| status.status.err()); let block_time = self.get_block_time(slot)?; infos.push(ConfirmedTransactionStatusWithSignature { signature, slot, err, memo: None, block_time, }); } get_status_info_timer.stop(); datapoint_info!( "blockstore-get-conf-sigs-for-addr-2", ( "get_before_slot_us", get_before_slot_timer.as_us() as i64, i64 ), ( "get_initial_slot_us", get_initial_slot_timer.as_us() as i64, i64 ), ( "starting_primary_index_iter_us", starting_primary_index_iter_timer.as_us() as i64, i64 ), ( "next_primary_index_iter_us", next_primary_index_iter_timer.as_us() as i64, i64 ), ( "get_status_info_us", get_status_info_timer.as_us() as i64, i64 ), ( "get_until_slot_us", get_until_slot_timer.as_us() as i64, i64 ) ); Ok(infos) } pub fn read_rewards(&self, index: Slot) -> Result<Option<Rewards>> { self.rewards_cf .get_protobuf_or_bincode::<Rewards>(index) .map(|result| result.map(|option| option.into())) } pub fn write_rewards(&self, index: Slot, rewards: Rewards) -> Result<()> { let rewards = rewards.into(); self.rewards_cf.put_protobuf(index, &rewards) } pub fn get_recent_perf_samples(&self, num: usize) -> Result<Vec<(Slot, PerfSample)>> { Ok(self .db .iter::<cf::PerfSamples>(IteratorMode::End)? .take(num) .map(|(slot, data)| { let perf_sample = deserialize(&data).unwrap(); (slot, perf_sample) }) .collect()) } pub fn write_perf_sample(&self, index: Slot, perf_sample: &PerfSample) -> Result<()> { self.perf_samples_cf.put(index, perf_sample) } /// Returns the entry vector for the slot starting with `shred_start_index` pub fn get_slot_entries(&self, slot: Slot, shred_start_index: u64) -> Result<Vec<Entry>> { self.get_slot_entries_with_shred_info(slot, shred_start_index, false) .map(|x| x.0) } /// Returns the entry vector for the slot starting with `shred_start_index`, the number of /// shreds that comprise the entry vector, and whether the slot is full (consumed all shreds). pub fn get_slot_entries_with_shred_info( &self, slot: Slot, start_index: u64, allow_dead_slots: bool, ) -> Result<(Vec<Entry>, u64, bool)> { if self.is_dead(slot) && !allow_dead_slots { return Err(BlockstoreError::DeadSlot); } let (completed_ranges, slot_meta) = self.get_completed_ranges(slot, start_index)?; if completed_ranges.is_empty() { return Ok((vec![], 0, false)); } let slot_meta = slot_meta.unwrap(); let num_shreds = completed_ranges .last() .map(|(_, end_index)| u64::from(*end_index) - start_index + 1) .unwrap_or(0); let entries: Result<Vec<Vec<Entry>>> = PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { completed_ranges .par_iter() .map(|(start_index, end_index)| { self.get_entries_in_data_block( slot, *start_index, *end_index, Some(&slot_meta), ) }) .collect() }) }); let entries: Vec<Entry> = entries?.into_iter().flatten().collect(); Ok((entries, num_shreds, slot_meta.is_full())) } fn get_completed_ranges( &self, slot: Slot, start_index: u64, ) -> Result<(CompletedRanges, Option<SlotMeta>)> { // lowest_cleanup_slot is the last slot that was not cleaned up by // LedgerCleanupService let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap(); if *lowest_cleanup_slot > slot { return Err(BlockstoreError::SlotCleanedUp); } let slot_meta_cf = self.db.column::<cf::SlotMeta>(); let slot_meta = slot_meta_cf.get(slot)?; if slot_meta.is_none() { return Ok((vec![], slot_meta)); } let slot_meta = slot_meta.unwrap(); // Find all the ranges for the completed data blocks let completed_ranges = Self::get_completed_data_ranges( start_index as u32, &slot_meta.completed_data_indexes[..], slot_meta.consumed as u32, ); Ok((completed_ranges, Some(slot_meta))) } // Get the range of indexes [start_index, end_index] of every completed data block fn get_completed_data_ranges( mut start_index: u32, completed_data_end_indexes: &[u32], consumed: u32, ) -> CompletedRanges { let mut completed_data_ranges = vec![]; let floor = completed_data_end_indexes .iter() .position(|i| *i >= start_index) .unwrap_or_else(|| completed_data_end_indexes.len()); for i in &completed_data_end_indexes[floor as usize..] { // `consumed` is the next missing shred index, but shred `i` existing in // completed_data_end_indexes implies it's not missing assert!(*i != consumed); if *i < consumed { completed_data_ranges.push((start_index, *i)); start_index = *i + 1; } } completed_data_ranges } pub fn get_entries_in_data_block( &self, slot: Slot, start_index: u32, end_index: u32, slot_meta: Option<&SlotMeta>, ) -> Result<Vec<Entry>> { let data_shred_cf = self.db.column::<cf::ShredData>(); // Short circuit on first error let data_shreds: Result<Vec<Shred>> = (start_index..=end_index) .map(|i| { data_shred_cf .get_bytes((slot, u64::from(i))) .and_then(|serialized_shred| { if serialized_shred.is_none() { if let Some(slot_meta) = slot_meta { panic!( "Shred with slot: {}, index: {}, consumed: {}, completed_indexes: {:?} must exist if shred index was included in a range: {} {}", slot, i, slot_meta.consumed, slot_meta.completed_data_indexes, start_index, end_index ); } else { return Err(BlockstoreError::InvalidShredData(Box::new( bincode::ErrorKind::Custom(format!( "Missing shred for slot {}, index {}", slot, i )), ))); } } Shred::new_from_serialized_shred(serialized_shred.unwrap()).map_err(|err| { BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( format!( "Could not reconstruct shred from shred payload: {:?}", err ), ))) }) }) }) .collect(); let data_shreds = data_shreds?; let last_shred = data_shreds.last().unwrap(); assert!(last_shred.data_complete() || last_shred.last_in_slot()); let deshred_payload = Shredder::deshred(&data_shreds).map_err(|e| { BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(format!( "Could not reconstruct data block from constituent shreds, error: {:?}", e )))) })?; debug!("{:?} shreds in last FEC set", data_shreds.len(),); bincode::deserialize::<Vec<Entry>>(&deshred_payload).map_err(|e| { BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(format!( "could not reconstruct entries: {:?}", e )))) }) } fn get_any_valid_slot_entries(&self, slot: Slot, start_index: u64) -> Vec<Entry> { let (completed_ranges, slot_meta) = self .get_completed_ranges(slot, start_index) .unwrap_or_default(); if completed_ranges.is_empty() { return vec![]; } let slot_meta = slot_meta.unwrap(); let entries: Vec<Vec<Entry>> = PAR_THREAD_POOL_ALL_CPUS.with(|thread_pool| { thread_pool.borrow().install(|| { completed_ranges .par_iter() .map(|(start_index, end_index)| { self.get_entries_in_data_block( slot, *start_index, *end_index, Some(&slot_meta), ) .unwrap_or_default() }) .collect() }) }); entries.into_iter().flatten().collect() } // Returns slots connecting to any element of the list `slots`. pub fn get_slots_since(&self, slots: &[u64]) -> Result<HashMap<u64, Vec<u64>>> { // Return error if there was a database error during lookup of any of the // slot indexes let slot_metas: Result<Vec<Option<SlotMeta>>> = slots.iter().map(|slot| self.meta(*slot)).collect(); let slot_metas = slot_metas?; let result: HashMap<u64, Vec<u64>> = slots .iter() .zip(slot_metas) .filter_map(|(height, meta)| { meta.map(|meta| { let valid_next_slots: Vec<u64> = meta .next_slots .iter() .cloned() .filter(|s| !self.is_dead(*s)) .collect(); (*height, valid_next_slots) }) }) .collect(); Ok(result) } pub fn is_root(&self, slot: Slot) -> bool { matches!(self.db.get::<cf::Root>(slot), Ok(Some(true))) } /// Returns true if a slot is between the rooted slot bounds of the ledger, but has not itself /// been rooted. This is either because the slot was skipped, or due to a gap in ledger data, /// as when booting from a newer snapshot. pub fn is_skipped(&self, slot: Slot) -> bool { let lowest_root = self .rooted_slot_iterator(0) .ok() .and_then(|mut iter| iter.next()) .unwrap_or_default(); match self.db.get::<cf::Root>(slot).ok().flatten() { Some(_) => false, None => slot < self.max_root() && slot > lowest_root, } } pub fn set_roots(&self, rooted_slots: &[u64]) -> Result<()> { let mut write_batch = self.db.batch()?; for slot in rooted_slots { write_batch.put::<cf::Root>(*slot, &true)?; } self.db.write(write_batch)?; let mut last_root = self.last_root.write().unwrap(); if *last_root == std::u64::MAX { *last_root = 0; } *last_root = cmp::max(*rooted_slots.iter().max().unwrap(), *last_root); Ok(()) } pub fn is_dead(&self, slot: Slot) -> bool { matches!( self.db .get::<cf::DeadSlots>(slot) .expect("fetch from DeadSlots column family failed"), Some(true) ) } pub fn set_dead_slot(&self, slot: Slot) -> Result<()> { self.dead_slots_cf.put(slot, &true) } pub fn store_duplicate_if_not_existing( &self, slot: Slot, shred1: Vec<u8>, shred2: Vec<u8>, ) -> Result<()> { if !self.has_duplicate_shreds_in_slot(slot) { self.store_duplicate_slot(slot, shred1, shred2) } else { Ok(()) } } pub fn store_duplicate_slot(&self, slot: Slot, shred1: Vec<u8>, shred2: Vec<u8>) -> Result<()> { let duplicate_slot_proof = DuplicateSlotProof::new(shred1, shred2); self.duplicate_slots_cf.put(slot, &duplicate_slot_proof) } pub fn get_duplicate_slot(&self, slot: u64) -> Option<DuplicateSlotProof> { self.duplicate_slots_cf .get(slot) .expect("fetch from DuplicateSlots column family failed") } // `new_shred` is assumed to have slot and index equal to the given slot and index. // Returns the existing shred if `new_shred` is not equal to the existing shred at the // given slot and index as this implies the leader generated two different shreds with // the same slot and index pub fn is_shred_duplicate( &self, slot: u64, index: u32, new_shred_raw: &[u8], is_data: bool, ) -> Option<Vec<u8>> { let res = if is_data { self.get_data_shred(slot, index as u64) .expect("fetch from DuplicateSlots column family failed") } else { self.get_coding_shred(slot, index as u64) .expect("fetch from DuplicateSlots column family failed") }; let mut payload = new_shred_raw.to_vec(); payload.resize( std::cmp::max(new_shred_raw.len(), crate::shred::SHRED_PAYLOAD_SIZE), 0, ); let new_shred = Shred::new_from_serialized_shred(payload).unwrap(); res.map(|existing_shred| { if existing_shred != new_shred.payload { Some(existing_shred) } else { None } }) .unwrap_or(None) } pub fn has_duplicate_shreds_in_slot(&self, slot: Slot) -> bool { self.duplicate_slots_cf .get(slot) .expect("fetch from DuplicateSlots column family failed") .is_some() } pub fn orphans_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = u64> + '_> { let orphans_iter = self .db .iter::<cf::Orphans>(IteratorMode::From(slot, IteratorDirection::Forward))?; Ok(orphans_iter.map(|(slot, _)| slot)) } pub fn dead_slots_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> { let dead_slots_iterator = self .db .iter::<cf::DeadSlots>(IteratorMode::From(slot, IteratorDirection::Forward))?; Ok(dead_slots_iterator.map(|(slot, _)| slot)) } pub fn duplicate_slots_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> { let duplicate_slots_iterator = self .db .iter::<cf::DuplicateSlots>(IteratorMode::From(slot, IteratorDirection::Forward))?; Ok(duplicate_slots_iterator.map(|(slot, _)| slot)) } pub fn last_root(&self) -> Slot { *self.last_root.read().unwrap() } // find the first available slot in blockstore that has some data in it pub fn lowest_slot(&self) -> Slot { for (slot, meta) in self .slot_meta_iterator(0) .expect("unable to iterate over meta") { if slot > 0 && meta.received > 0 { return slot; } } // This means blockstore is empty, should never get here aside from right at boot. self.last_root() } pub fn storage_size(&self) -> Result<u64> { self.db.storage_size() } pub fn is_primary_access(&self) -> bool { self.db.is_primary_access() } } // Update the `completed_data_indexes` with a new shred `new_shred_index`. If a // data set is complete, return the range of shred indexes [start_index, end_index] // for that completed data set. fn update_completed_data_indexes( is_last_in_data: bool, new_shred_index: u32, received_data_shreds: &ShredIndex, // Sorted array of shred indexes marked data complete completed_data_indexes: &mut Vec<u32>, ) -> Vec<(u32, u32)> { let mut first_greater_pos = None; let mut prev_completed_shred_index = None; // Find the first item in `completed_data_indexes > new_shred_index` for (i, completed_data_index) in completed_data_indexes.iter().enumerate() { // `completed_data_indexes` should be sorted from smallest to largest assert!( prev_completed_shred_index.is_none() || *completed_data_index > prev_completed_shred_index.unwrap() ); if *completed_data_index > new_shred_index { first_greater_pos = Some(i); break; } prev_completed_shred_index = Some(*completed_data_index); } // Consecutive entries i, k, j in this vector represent potential ranges [i, k), // [k, j) that could be completed data ranges let mut check_ranges: Vec<u32> = vec![prev_completed_shred_index .map(|completed_data_shred_index| completed_data_shred_index + 1) .unwrap_or(0)]; let mut first_greater_data_complete_index = first_greater_pos.map(|i| completed_data_indexes[i]); // `new_shred_index` is data complete, so need to insert here into the // `completed_data_indexes` if is_last_in_data { if first_greater_pos.is_some() { // If there exists a data complete shred greater than `new_shred_index`, // and the new shred is marked data complete, then the range // [new_shred_index + 1, completed_data_indexes[pos]] may be complete, // so add that range to check check_ranges.push(new_shred_index + 1); } completed_data_indexes.insert( first_greater_pos.unwrap_or_else(|| { // If `first_greater_pos` is none, then there was no greater // data complete index so mark this new shred's index as the latest data // complete index first_greater_data_complete_index = Some(new_shred_index); completed_data_indexes.len() }), new_shred_index, ); } if first_greater_data_complete_index.is_none() { // That means new_shred_index > all known completed data indexes and // new shred not data complete, which means the data set of that new // shred is not data complete return vec![]; } check_ranges.push(first_greater_data_complete_index.unwrap() + 1); let mut completed_data_ranges = vec![]; for range in check_ranges.windows(2) { let mut is_complete = true; for shred_index in range[0]..range[1] { // If we're missing any shreds, the data set cannot be confirmed // to be completed, so check the next range if !received_data_shreds.is_present(shred_index as u64) { is_complete = false; break; } } if is_complete { completed_data_ranges.push((range[0], range[1] - 1)); } } completed_data_ranges } fn update_slot_meta( is_last_in_slot: bool, is_last_in_data: bool, slot_meta: &mut SlotMeta, index: u32, new_consumed: u64, reference_tick: u8, received_data_shreds: &ShredIndex, ) -> Vec<(u32, u32)> { let maybe_first_insert = slot_meta.received == 0; // Index is zero-indexed, while the "received" height starts from 1, // so received = index + 1 for the same shred. slot_meta.received = cmp::max((u64::from(index) + 1) as u64, slot_meta.received); if maybe_first_insert && slot_meta.received > 0 { // predict the timestamp of what would have been the first shred in this slot let slot_time_elapsed = u64::from(reference_tick) * 1000 / DEFAULT_TICKS_PER_SECOND; slot_meta.first_shred_timestamp = timestamp() - slot_time_elapsed; } slot_meta.consumed = new_consumed; slot_meta.last_index = { // If the last index in the slot hasn't been set before, then // set it to this shred index if slot_meta.last_index == std::u64::MAX { if is_last_in_slot { u64::from(index) } else { std::u64::MAX } } else { slot_meta.last_index } }; update_completed_data_indexes( is_last_in_slot || is_last_in_data, index, received_data_shreds, &mut slot_meta.completed_data_indexes, ) } fn get_index_meta_entry<'a>( db: &Database, slot: Slot, index_working_set: &'a mut HashMap<u64, IndexMetaWorkingSetEntry>, index_meta_time: &mut u64, ) -> &'a mut IndexMetaWorkingSetEntry { let index_cf = db.column::<cf::Index>(); let mut total_start = Measure::start("Total elapsed"); let res = index_working_set.entry(slot).or_insert_with(|| { let newly_inserted_meta = index_cf .get(slot) .unwrap() .unwrap_or_else(|| Index::new(slot)); IndexMetaWorkingSetEntry { index: newly_inserted_meta, did_insert_occur: false, } }); total_start.stop(); *index_meta_time += total_start.as_us(); res } fn get_slot_meta_entry<'a>( db: &Database, slot_meta_working_set: &'a mut HashMap<u64, SlotMetaWorkingSetEntry>, slot: Slot, parent_slot: Slot, ) -> &'a mut SlotMetaWorkingSetEntry { let meta_cf = db.column::<cf::SlotMeta>(); // Check if we've already inserted the slot metadata for this shred's slot slot_meta_working_set.entry(slot).or_insert_with(|| { // Store a 2-tuple of the metadata (working copy, backup copy) if let Some(mut meta) = meta_cf.get(slot).expect("Expect database get to succeed") { let backup = Some(meta.clone()); // If parent_slot == std::u64::MAX, then this is one of the orphans inserted // during the chaining process, see the function find_slot_meta_in_cached_state() // for details. Slots that are orphans are missing a parent_slot, so we should // fill in the parent now that we know it. if is_orphan(&meta) { meta.parent_slot = parent_slot; } SlotMetaWorkingSetEntry::new(Rc::new(RefCell::new(meta)), backup) } else { SlotMetaWorkingSetEntry::new( Rc::new(RefCell::new(SlotMeta::new(slot, parent_slot))), None, ) } }) } fn get_last_hash<'a>(iterator: impl Iterator<Item = &'a Entry> + 'a) -> Option<Hash> { iterator.last().map(|entry| entry.hash) } fn is_valid_write_to_slot_0(slot_to_write: u64, parent_slot: Slot, last_root: u64) -> bool { slot_to_write == 0 && last_root == 0 && parent_slot == 0 } fn send_signals( new_shreds_signals: &[SyncSender<bool>], completed_slots_senders: &[SyncSender<Vec<u64>>], should_signal: bool, newly_completed_slots: Vec<u64>, ) { if should_signal { for signal in new_shreds_signals { let _ = signal.try_send(true); } } if !completed_slots_senders.is_empty() && !newly_completed_slots.is_empty() { let mut slots: Vec<_> = (0..completed_slots_senders.len() - 1) .map(|_| newly_completed_slots.clone()) .collect(); slots.push(newly_completed_slots); for (signal, slots) in completed_slots_senders.iter().zip(slots.into_iter()) { let res = signal.try_send(slots); if let Err(TrySendError::Full(_)) = res { datapoint_error!( "blockstore_error", ( "error", "Unable to send newly completed slot because channel is full".to_string(), String ), ); } } } } fn commit_slot_meta_working_set( slot_meta_working_set: &HashMap<u64, SlotMetaWorkingSetEntry>, completed_slots_senders: &[SyncSender<Vec<u64>>], write_batch: &mut WriteBatch, ) -> Result<(bool, Vec<u64>)> { let mut should_signal = false; let mut newly_completed_slots = vec![]; // Check if any metadata was changed, if so, insert the new version of the // metadata into the write batch for (slot, slot_meta_entry) in slot_meta_working_set.iter() { // Any slot that wasn't written to should have been filtered out by now. assert!(slot_meta_entry.did_insert_occur); let meta: &SlotMeta = &RefCell::borrow(&*slot_meta_entry.new_slot_meta); let meta_backup = &slot_meta_entry.old_slot_meta; if !completed_slots_senders.is_empty() && is_newly_completed_slot(meta, meta_backup) { newly_completed_slots.push(*slot); } // Check if the working copy of the metadata has changed if Some(meta) != meta_backup.as_ref() { should_signal = should_signal || slot_has_updates(meta, &meta_backup); write_batch.put::<cf::SlotMeta>(*slot, &meta)?; } } Ok((should_signal, newly_completed_slots)) } // 1) Find the slot metadata in the cache of dirty slot metadata we've previously touched, // else: // 2) Search the database for that slot metadata. If still no luck, then: // 3) Create a dummy orphan slot in the database fn find_slot_meta_else_create<'a>( db: &Database, working_set: &'a HashMap<u64, SlotMetaWorkingSetEntry>, chained_slots: &'a mut HashMap<u64, Rc<RefCell<SlotMeta>>>, slot_index: u64, ) -> Result<Rc<RefCell<SlotMeta>>> { let result = find_slot_meta_in_cached_state(working_set, chained_slots, slot_index); if let Some(slot) = result { Ok(slot) } else { find_slot_meta_in_db_else_create(db, slot_index, chained_slots) } } // Search the database for that slot metadata. If still no luck, then // create a dummy orphan slot in the database fn find_slot_meta_in_db_else_create( db: &Database, slot: Slot, insert_map: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>, ) -> Result<Rc<RefCell<SlotMeta>>> { if let Some(slot_meta) = db.column::<cf::SlotMeta>().get(slot)? { insert_map.insert(slot, Rc::new(RefCell::new(slot_meta))); Ok(insert_map.get(&slot).unwrap().clone()) } else { // If this slot doesn't exist, make a orphan slot. This way we // remember which slots chained to this one when we eventually get a real shred // for this slot insert_map.insert(slot, Rc::new(RefCell::new(SlotMeta::new_orphan(slot)))); Ok(insert_map.get(&slot).unwrap().clone()) } } // Find the slot metadata in the cache of dirty slot metadata we've previously touched fn find_slot_meta_in_cached_state<'a>( working_set: &'a HashMap<u64, SlotMetaWorkingSetEntry>, chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>, slot: Slot, ) -> Option<Rc<RefCell<SlotMeta>>> { if let Some(entry) = working_set.get(&slot) { Some(entry.new_slot_meta.clone()) } else { chained_slots.get(&slot).cloned() } } // Chaining based on latest discussion here: https://github.com/solana-labs/solana/pull/2253 fn handle_chaining( db: &Database, write_batch: &mut WriteBatch, working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>, ) -> Result<()> { // Handle chaining for all the SlotMetas that were inserted into working_set.retain(|_, entry| entry.did_insert_occur); let mut new_chained_slots = HashMap::new(); let working_set_slots: Vec<_> = working_set.keys().collect(); for slot in working_set_slots { handle_chaining_for_slot(db, write_batch, working_set, &mut new_chained_slots, *slot)?; } // Write all the newly changed slots in new_chained_slots to the write_batch for (slot, meta) in new_chained_slots.iter() { let meta: &SlotMeta = &RefCell::borrow(&*meta); write_batch.put::<cf::SlotMeta>(*slot, meta)?; } Ok(()) } fn handle_chaining_for_slot( db: &Database, write_batch: &mut WriteBatch, working_set: &HashMap<u64, SlotMetaWorkingSetEntry>, new_chained_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>, slot: Slot, ) -> Result<()> { let slot_meta_entry = working_set .get(&slot) .expect("Slot must exist in the working_set hashmap"); let meta = &slot_meta_entry.new_slot_meta; let meta_backup = &slot_meta_entry.old_slot_meta; { let mut meta_mut = meta.borrow_mut(); let was_orphan_slot = meta_backup.is_some() && is_orphan(meta_backup.as_ref().unwrap()); // If: // 1) This is a new slot // 2) slot != 0 // then try to chain this slot to a previous slot if slot != 0 { let prev_slot = meta_mut.parent_slot; // Check if the slot represented by meta_mut is either a new slot or a orphan. // In both cases we need to run the chaining logic b/c the parent on the slot was // previously unknown. if meta_backup.is_none() || was_orphan_slot { let prev_slot_meta = find_slot_meta_else_create(db, working_set, new_chained_slots, prev_slot)?; // This is a newly inserted slot/orphan so run the chaining logic to link it to a // newly discovered parent chain_new_slot_to_prev_slot(&mut prev_slot_meta.borrow_mut(), slot, &mut meta_mut); // If the parent of `slot` is a newly inserted orphan, insert it into the orphans // column family if is_orphan(&RefCell::borrow(&*prev_slot_meta)) { write_batch.put::<cf::Orphans>(prev_slot, &true)?; } } } // At this point this slot has received a parent, so it's no longer an orphan if was_orphan_slot { write_batch.delete::<cf::Orphans>(slot)?; } } // If this is a newly inserted slot, then we know the children of this slot were not previously // connected to the trunk of the ledger. Thus if slot.is_connected is now true, we need to // update all child slots with `is_connected` = true because these children are also now newly // connected to trunk of the ledger let should_propagate_is_connected = is_newly_completed_slot(&RefCell::borrow(&*meta), meta_backup) && RefCell::borrow(&*meta).is_connected; if should_propagate_is_connected { // slot_function returns a boolean indicating whether to explore the children // of the input slot let slot_function = |slot: &mut SlotMeta| { slot.is_connected = true; // We don't want to set the is_connected flag on the children of non-full // slots slot.is_full() }; traverse_children_mut( db, slot, &meta, working_set, new_chained_slots, slot_function, )?; } Ok(()) } fn traverse_children_mut<F>( db: &Database, slot: Slot, slot_meta: &Rc<RefCell<SlotMeta>>, working_set: &HashMap<u64, SlotMetaWorkingSetEntry>, new_chained_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>, slot_function: F, ) -> Result<()> where F: Fn(&mut SlotMeta) -> bool, { let mut next_slots: Vec<(u64, Rc<RefCell<SlotMeta>>)> = vec![(slot, slot_meta.clone())]; while !next_slots.is_empty() { let (_, current_slot) = next_slots.pop().unwrap(); // Check whether we should explore the children of this slot if slot_function(&mut current_slot.borrow_mut()) { let current_slot = &RefCell::borrow(&*current_slot); for next_slot_index in current_slot.next_slots.iter() { let next_slot = find_slot_meta_else_create( db, working_set, new_chained_slots, *next_slot_index, )?; next_slots.push((*next_slot_index, next_slot)); } } } Ok(()) } fn is_orphan(meta: &SlotMeta) -> bool { // If we have no parent, then this is the head of a detached chain of // slots !meta.is_parent_set() } // 1) Chain current_slot to the previous slot defined by prev_slot_meta // 2) Determine whether to set the is_connected flag fn chain_new_slot_to_prev_slot( prev_slot_meta: &mut SlotMeta, current_slot: Slot, current_slot_meta: &mut SlotMeta, ) { prev_slot_meta.next_slots.push(current_slot); current_slot_meta.is_connected = prev_slot_meta.is_connected && prev_slot_meta.is_full(); } fn is_newly_completed_slot(slot_meta: &SlotMeta, backup_slot_meta: &Option<SlotMeta>) -> bool { slot_meta.is_full() && (backup_slot_meta.is_none() || slot_meta.consumed != backup_slot_meta.as_ref().unwrap().consumed) } fn slot_has_updates(slot_meta: &SlotMeta, slot_meta_backup: &Option<SlotMeta>) -> bool { // We should signal that there are updates if we extended the chain of consecutive blocks starting // from block 0, which is true iff: // 1) The block with index prev_block_index is itself part of the trunk of consecutive blocks // starting from block 0, slot_meta.is_connected && // AND either: // 1) The slot didn't exist in the database before, and now we have a consecutive // block for that slot ((slot_meta_backup.is_none() && slot_meta.consumed != 0) || // OR // 2) The slot did exist, but now we have a new consecutive block for that slot (slot_meta_backup.is_some() && slot_meta_backup.as_ref().unwrap().consumed != slot_meta.consumed)) } // Creates a new ledger with slot 0 full of ticks (and only ticks). // // Returns the blockhash that can be used to append entries with. pub fn create_new_ledger( ledger_path: &Path, genesis_config: &GenesisConfig, max_genesis_archive_unpacked_size: u64, access_type: AccessType, ) -> Result<Hash> { Blockstore::destroy(ledger_path)?; genesis_config.write(&ledger_path)?; // Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger. let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None, false)?; let ticks_per_slot = genesis_config.ticks_per_slot; let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0); let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash()); let last_hash = entries.last().unwrap().hash; let version = solana_sdk::shred_version::version_from_hash(&last_hash); let shredder = Shredder::new(0, 0, Arc::new(Keypair::new()), 0, version).unwrap(); let shreds = shredder.entries_to_shreds(&entries, true, 0).0; assert!(shreds.last().unwrap().last_in_slot()); blockstore.insert_shreds(shreds, None, false)?; blockstore.set_roots(&[0])?; // Explicitly close the blockstore before we create the archived genesis file drop(blockstore); let archive_path = ledger_path.join("genesis.tar.bz2"); let args = vec![ "jcfhS", archive_path.to_str().unwrap(), "-C", ledger_path.to_str().unwrap(), "genesis.bin", "rocksdb", ]; let output = std::process::Command::new("tar") .args(&args) .output() .unwrap(); if !output.status.success() { use std::str::from_utf8; error!("tar stdout: {}", from_utf8(&output.stdout).unwrap_or("?")); error!("tar stderr: {}", from_utf8(&output.stderr).unwrap_or("?")); return Err(BlockstoreError::Io(IoError::new( ErrorKind::Other, format!( "Error trying to generate snapshot archive: {}", output.status ), ))); } // ensure the genesis archive can be unpacked and it is under // max_genesis_archive_unpacked_size, immediately after creating it above. { let temp_dir = tempfile::tempdir_in(ledger_path).unwrap(); // unpack into a temp dir, while completely discarding the unpacked files let unpack_check = unpack_genesis_archive( &archive_path, &temp_dir.into_path(), max_genesis_archive_unpacked_size, ); if let Err(unpack_err) = unpack_check { // stash problematic original archived genesis related files to // examine them later and to prevent validator and ledger-tool from // naively consuming them let mut error_messages = String::new(); fs::rename( &ledger_path.join("genesis.tar.bz2"), ledger_path.join("genesis.tar.bz2.failed"), ) .unwrap_or_else(|e| { error_messages += &format!("/failed to stash problematic genesis.tar.bz2: {}", e) }); fs::rename( &ledger_path.join("genesis.bin"), ledger_path.join("genesis.bin.failed"), ) .unwrap_or_else(|e| { error_messages += &format!("/failed to stash problematic genesis.bin: {}", e) }); fs::rename( &ledger_path.join("rocksdb"), ledger_path.join("rocksdb.failed"), ) .unwrap_or_else(|e| { error_messages += &format!("/failed to stash problematic rocksdb: {}", e) }); return Err(BlockstoreError::Io(IoError::new( ErrorKind::Other, format!( "Error checking to unpack genesis archive: {}{}", unpack_err, error_messages ), ))); } } Ok(last_hash) } #[macro_export] macro_rules! tmp_ledger_name { () => { &format!("{}-{}", file!(), line!()) }; } #[macro_export] macro_rules! get_tmp_ledger_path { () => { $crate::blockstore::get_ledger_path_from_name($crate::tmp_ledger_name!()) }; } pub fn get_ledger_path_from_name(name: &str) -> PathBuf { use std::env; let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()); let keypair = Keypair::new(); let path = [ out_dir, "ledger".to_string(), format!("{}-{}", name, keypair.pubkey()), ] .iter() .collect(); // whack any possible collision let _ignored = fs::remove_dir_all(&path); path } #[macro_export] macro_rules! create_new_tmp_ledger { ($genesis_config:expr) => { $crate::blockstore::create_new_ledger_from_name( $crate::tmp_ledger_name!(), $genesis_config, $crate::blockstore_db::AccessType::PrimaryOnly, ) }; } pub fn verify_shred_slots(slot: Slot, parent_slot: Slot, last_root: Slot) -> bool { if !is_valid_write_to_slot_0(slot, parent_slot, last_root) { // Check that the parent_slot < slot if parent_slot >= slot { return false; } // Ignore shreds that chain to slots before the last root if parent_slot < last_root { return false; } // Above two checks guarantee that by this point, slot > last_root } true } // Same as `create_new_ledger()` but use a temporary ledger name based on the provided `name` // // Note: like `create_new_ledger` the returned ledger will have slot 0 full of ticks (and only // ticks) pub fn create_new_ledger_from_name( name: &str, genesis_config: &GenesisConfig, access_type: AccessType, ) -> (PathBuf, Hash) { let ledger_path = get_ledger_path_from_name(name); let blockhash = create_new_ledger( &ledger_path, genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, access_type, ) .unwrap(); (ledger_path, blockhash) } pub fn entries_to_test_shreds( entries: Vec<Entry>, slot: Slot, parent_slot: Slot, is_full_slot: bool, version: u16, ) -> Vec<Shred> { Shredder::new(slot, parent_slot, Arc::new(Keypair::new()), 0, version) .unwrap() .entries_to_shreds(&entries, is_full_slot, 0) .0 } // used for tests only pub fn make_slot_entries( slot: Slot, parent_slot: Slot, num_entries: u64, ) -> (Vec<Shred>, Vec<Entry>) { let entries = create_ticks(num_entries, 0, Hash::default()); let shreds = entries_to_test_shreds(entries.clone(), slot, parent_slot, true, 0); (shreds, entries) } // used for tests only pub fn make_many_slot_entries( start_slot: Slot, num_slots: u64, entries_per_slot: u64, ) -> (Vec<Shred>, Vec<Entry>) { let mut shreds = vec![]; let mut entries = vec![]; for slot in start_slot..start_slot + num_slots { let parent_slot = if slot == 0 { 0 } else { slot - 1 }; let (slot_shreds, slot_entries) = make_slot_entries(slot, parent_slot, entries_per_slot); shreds.extend(slot_shreds); entries.extend(slot_entries); } (shreds, entries) } // Create shreds for slots that have a parent-child relationship defined by the input `chain` // used for tests only pub fn make_chaining_slot_entries( chain: &[u64], entries_per_slot: u64, ) -> Vec<(Vec<Shred>, Vec<Entry>)> { let mut slots_shreds_and_entries = vec![]; for (i, slot) in chain.iter().enumerate() { let parent_slot = { if *slot == 0 || i == 0 { 0 } else { chain[i - 1] } }; let result = make_slot_entries(*slot, parent_slot, entries_per_slot); slots_shreds_and_entries.push(result); } slots_shreds_and_entries } #[cfg(not(unix))] fn adjust_ulimit_nofile(_enforce_ulimit_nofile: bool) -> Result<()> { Ok(()) } #[cfg(unix)] fn adjust_ulimit_nofile(enforce_ulimit_nofile: bool) -> Result<()> { // Rocks DB likes to have many open files. The default open file descriptor limit is // usually not enough let desired_nofile = 500000; fn get_nofile() -> libc::rlimit { let mut nofile = libc::rlimit { rlim_cur: 0, rlim_max: 0, }; if unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut nofile) } != 0 { warn!("getrlimit(RLIMIT_NOFILE) failed"); } nofile } let mut nofile = get_nofile(); if nofile.rlim_cur < desired_nofile { nofile.rlim_cur = desired_nofile; if unsafe { libc::setrlimit(libc::RLIMIT_NOFILE, &nofile) } != 0 { error!( "Unable to increase the maximum open file descriptor limit to {}", desired_nofile ); if cfg!(target_os = "macos") { error!( "On mac OS you may need to run |sudo launchctl limit maxfiles {} {}| first", desired_nofile, desired_nofile, ); } if enforce_ulimit_nofile { return Err(BlockstoreError::UnableToSetOpenFileDescriptorLimit); } } nofile = get_nofile(); } info!("Maximum open file descriptors: {}", nofile.rlim_cur); Ok(()) } #[cfg(test)] pub mod tests { use super::*; use crate::{ entry::{next_entry, next_entry_mut}, genesis_utils::{create_genesis_config, GenesisConfigInfo}, leader_schedule::{FixedSchedule, LeaderSchedule}, shred::{max_ticks_per_n_shreds, DataShredHeader}, }; use assert_matches::assert_matches; use bincode::serialize; use itertools::Itertools; use rand::{seq::SliceRandom, thread_rng}; use solana_account_decoder::parse_token::UiTokenAmount; use solana_runtime::bank::{Bank, RewardType}; use solana_sdk::{ hash::{self, hash, Hash}, instruction::CompiledInstruction, packet::PACKET_DATA_SIZE, pubkey::Pubkey, signature::Signature, transaction::TransactionError, }; use solana_storage_proto::convert::generated; use solana_transaction_status::{InnerInstructions, Reward, Rewards, TransactionTokenBalance}; use std::time::Duration; // used for tests only pub(crate) fn make_slot_entries_with_transactions(num_entries: u64) -> Vec<Entry> { let mut entries: Vec<Entry> = Vec::new(); for x in 0..num_entries { let transaction = Transaction::new_with_compiled_instructions( &[&Keypair::new()], &[solana_sdk::pubkey::new_rand()], Hash::default(), vec![solana_sdk::pubkey::new_rand()], vec![CompiledInstruction::new(1, &(), vec![0])], ); entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction])); let mut tick = create_ticks(1, 0, hash(&serialize(&x).unwrap())); entries.append(&mut tick); } entries } #[test] fn test_create_new_ledger() { let mint_total = 1_000_000_000_000; let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(mint_total); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let ledger = Blockstore::open(&ledger_path).unwrap(); let ticks = create_ticks(genesis_config.ticks_per_slot, 0, genesis_config.hash()); let entries = ledger.get_slot_entries(0, 0).unwrap(); assert_eq!(ticks, entries); // Destroying database without closing it first is undefined behavior drop(ledger); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] fn test_insert_get_bytes() { // Create enough entries to ensure there are at least two shreds created let num_entries = max_ticks_per_n_shreds(1, None) + 1; assert!(num_entries > 1); let (mut shreds, _) = make_slot_entries(0, 0, num_entries); let ledger_path = get_tmp_ledger_path!(); let ledger = Blockstore::open(&ledger_path).unwrap(); // Insert last shred, test we can retrieve it let last_shred = shreds.pop().unwrap(); assert!(last_shred.index() > 0); ledger .insert_shreds(vec![last_shred.clone()], None, false) .unwrap(); let serialized_shred = ledger .data_shred_cf .get_bytes((0, last_shred.index() as u64)) .unwrap() .unwrap(); let deserialized_shred = Shred::new_from_serialized_shred(serialized_shred).unwrap(); assert_eq!(last_shred, deserialized_shred); // Destroying database without closing it first is undefined behavior drop(ledger); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] fn test_write_entries() { solana_logger::setup(); let ledger_path = get_tmp_ledger_path!(); { let ticks_per_slot = 10; let num_slots = 10; let ledger = Blockstore::open(&ledger_path).unwrap(); let mut ticks = vec![]; //let mut shreds_per_slot = 0 as u64; let mut shreds_per_slot = vec![]; for i in 0..num_slots { let mut new_ticks = create_ticks(ticks_per_slot, 0, Hash::default()); let num_shreds = ledger .write_entries( i, 0, 0, ticks_per_slot, Some(i.saturating_sub(1)), true, &Arc::new(Keypair::new()), new_ticks.clone(), 0, ) .unwrap() as u64; shreds_per_slot.push(num_shreds); ticks.append(&mut new_ticks); } for i in 0..num_slots { let meta = ledger.meta(i).unwrap().unwrap(); let num_shreds = shreds_per_slot[i as usize]; assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.received, num_shreds); assert_eq!(meta.last_index, num_shreds - 1); if i == num_slots - 1 { assert!(meta.next_slots.is_empty()); } else { assert_eq!(meta.next_slots, vec![i + 1]); } if i == 0 { assert_eq!(meta.parent_slot, 0); } else { assert_eq!(meta.parent_slot, i - 1); } assert_eq!( &ticks[(i * ticks_per_slot) as usize..((i + 1) * ticks_per_slot) as usize], &ledger.get_slot_entries(i, 0).unwrap()[..] ); } /* // Simulate writing to the end of a slot with existing ticks ledger .write_entries( num_slots, ticks_per_slot - 1, ticks_per_slot - 2, ticks_per_slot, &ticks[0..2], ) .unwrap(); let meta = ledger.meta(num_slots).unwrap().unwrap(); assert_eq!(meta.consumed, 0); // received shred was ticks_per_slot - 2, so received should be ticks_per_slot - 2 + 1 assert_eq!(meta.received, ticks_per_slot - 1); // last shred index ticks_per_slot - 2 because that's the shred that made tick_height == ticks_per_slot // for the slot assert_eq!(meta.last_index, ticks_per_slot - 2); assert_eq!(meta.parent_slot, num_slots - 1); assert_eq!(meta.next_slots, vec![num_slots + 1]); assert_eq!( &ticks[0..1], &ledger .get_slot_entries(num_slots, ticks_per_slot - 2) .unwrap()[..] ); // We wrote two entries, the second should spill into slot num_slots + 1 let meta = ledger.meta(num_slots + 1).unwrap().unwrap(); assert_eq!(meta.consumed, 1); assert_eq!(meta.received, 1); assert_eq!(meta.last_index, std::u64::MAX); assert_eq!(meta.parent_slot, num_slots); assert!(meta.next_slots.is_empty()); assert_eq!( &ticks[1..2], &ledger.get_slot_entries(num_slots + 1, 0).unwrap()[..] ); */ } Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] fn test_put_get_simple() { let ledger_path = get_tmp_ledger_path!(); let ledger = Blockstore::open(&ledger_path).unwrap(); // Test meta column family let meta = SlotMeta::new(0, 1); ledger.meta_cf.put(0, &meta).unwrap(); let result = ledger .meta_cf .get(0) .unwrap() .expect("Expected meta object to exist"); assert_eq!(result, meta); // Test erasure column family let erasure = vec![1u8; 16]; let erasure_key = (0, 0); ledger .code_shred_cf .put_bytes(erasure_key, &erasure) .unwrap(); let result = ledger .code_shred_cf .get_bytes(erasure_key) .unwrap() .expect("Expected erasure object to exist"); assert_eq!(result, erasure); // Test data column family let data = vec![2u8; 16]; let data_key = (0, 0); ledger.data_shred_cf.put_bytes(data_key, &data).unwrap(); let result = ledger .data_shred_cf .get_bytes(data_key) .unwrap() .expect("Expected data object to exist"); assert_eq!(result, data); // Destroying database without closing it first is undefined behavior drop(ledger); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] fn test_read_shred_bytes() { let slot = 0; let (shreds, _) = make_slot_entries(slot, 0, 100); let num_shreds = shreds.len() as u64; let shred_bufs: Vec<_> = shreds.iter().map(|shred| shred.payload.clone()).collect(); let ledger_path = get_tmp_ledger_path!(); let ledger = Blockstore::open(&ledger_path).unwrap(); ledger.insert_shreds(shreds, None, false).unwrap(); let mut buf = [0; 4096]; let (_, bytes) = ledger.get_data_shreds(slot, 0, 1, &mut buf).unwrap(); assert_eq!(buf[..bytes], shred_bufs[0][..bytes]); let (last_index, bytes2) = ledger.get_data_shreds(slot, 0, 2, &mut buf).unwrap(); assert_eq!(last_index, 1); assert!(bytes2 > bytes); { let shred_data_1 = &buf[..bytes]; assert_eq!(shred_data_1, &shred_bufs[0][..bytes]); let shred_data_2 = &buf[bytes..bytes2]; assert_eq!(shred_data_2, &shred_bufs[1][..bytes2 - bytes]); } // buf size part-way into shred[1], should just return shred[0] let mut buf = vec![0; bytes + 1]; let (last_index, bytes3) = ledger.get_data_shreds(slot, 0, 2, &mut buf).unwrap(); assert_eq!(last_index, 0); assert_eq!(bytes3, bytes); let mut buf = vec![0; bytes2 - 1]; let (last_index, bytes4) = ledger.get_data_shreds(slot, 0, 2, &mut buf).unwrap(); assert_eq!(last_index, 0); assert_eq!(bytes4, bytes); let mut buf = vec![0; bytes * 2]; let (last_index, bytes6) = ledger .get_data_shreds(slot, num_shreds - 1, num_shreds, &mut buf) .unwrap(); assert_eq!(last_index, num_shreds - 1); { let shred_data = &buf[..bytes6]; assert_eq!(shred_data, &shred_bufs[(num_shreds - 1) as usize][..bytes6]); } // Read out of range let (last_index, bytes6) = ledger .get_data_shreds(slot, num_shreds, num_shreds + 2, &mut buf) .unwrap(); assert_eq!(last_index, 0); assert_eq!(bytes6, 0); // Destroying database without closing it first is undefined behavior drop(ledger); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] fn test_shred_cleanup_check() { let slot = 1; let (shreds, _) = make_slot_entries(slot, 0, 100); let ledger_path = get_tmp_ledger_path!(); let ledger = Blockstore::open(&ledger_path).unwrap(); ledger.insert_shreds(shreds, None, false).unwrap(); let mut buf = [0; 4096]; assert!(ledger.get_data_shreds(slot, 0, 1, &mut buf).is_ok()); let max_purge_slot = 1; ledger .run_purge(0, max_purge_slot, PurgeType::PrimaryIndex) .unwrap(); *ledger.lowest_cleanup_slot.write().unwrap() = max_purge_slot; let mut buf = [0; 4096]; assert!(ledger.get_data_shreds(slot, 0, 1, &mut buf).is_err()); } #[test] fn test_insert_data_shreds_basic() { // Create enough entries to ensure there are at least two shreds created let num_entries = max_ticks_per_n_shreds(1, None) + 1; assert!(num_entries > 1); let (mut shreds, entries) = make_slot_entries(0, 0, num_entries); let num_shreds = shreds.len() as u64; let ledger_path = get_tmp_ledger_path!(); let ledger = Blockstore::open(&ledger_path).unwrap(); // Insert last shred, we're missing the other shreds, so no consecutive // shreds starting from slot 0, index 0 should exist. assert!(shreds.len() > 1); let last_shred = shreds.pop().unwrap(); ledger.insert_shreds(vec![last_shred], None, false).unwrap(); assert!(ledger.get_slot_entries(0, 0).unwrap().is_empty()); let meta = ledger .meta(0) .unwrap() .expect("Expected new metadata object to be created"); assert!(meta.consumed == 0 && meta.received == num_shreds); // Insert the other shreds, check for consecutive returned entries ledger.insert_shreds(shreds, None, false).unwrap(); let result = ledger.get_slot_entries(0, 0).unwrap(); assert_eq!(result, entries); let meta = ledger .meta(0) .unwrap() .expect("Expected new metadata object to exist"); assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.received, num_shreds); assert_eq!(meta.parent_slot, 0); assert_eq!(meta.last_index, num_shreds - 1); assert!(meta.next_slots.is_empty()); assert!(meta.is_connected); // Destroying database without closing it first is undefined behavior drop(ledger); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] fn test_insert_data_shreds_reverse() { let num_shreds = 10; let num_entries = max_ticks_per_n_shreds(num_shreds, None); let (mut shreds, entries) = make_slot_entries(0, 0, num_entries); let num_shreds = shreds.len() as u64; let ledger_path = get_tmp_ledger_path!(); let ledger = Blockstore::open(&ledger_path).unwrap(); // Insert shreds in reverse, check for consecutive returned shreds for i in (0..num_shreds).rev() { let shred = shreds.pop().unwrap(); ledger.insert_shreds(vec![shred], None, false).unwrap(); let result = ledger.get_slot_entries(0, 0).unwrap(); let meta = ledger .meta(0) .unwrap() .expect("Expected metadata object to exist"); assert_eq!(meta.last_index, num_shreds - 1); if i != 0 { assert_eq!(result.len(), 0); assert!(meta.consumed == 0 && meta.received == num_shreds as u64); } else { assert_eq!(meta.parent_slot, 0); assert_eq!(result, entries); assert!(meta.consumed == num_shreds as u64 && meta.received == num_shreds as u64); } } // Destroying database without closing it first is undefined behavior drop(ledger); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] fn test_insert_slots() { test_insert_data_shreds_slots("test_insert_data_shreds_slots_single", false); test_insert_data_shreds_slots("test_insert_data_shreds_slots_bulk", true); } /* #[test] pub fn test_iteration_order() { let slot = 0; let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Write entries let num_entries = 8; let entries = make_tiny_test_entries(num_entries); let mut shreds = entries.to_single_entry_shreds(); for (i, b) in shreds.iter_mut().enumerate() { b.set_index(1 << (i * 8)); b.set_slot(0); } blockstore .write_shreds(&shreds) .expect("Expected successful write of shreds"); let mut db_iterator = blockstore .db .cursor::<cf::Data>() .expect("Expected to be able to open database iterator"); db_iterator.seek((slot, 1)); // Iterate through ledger for i in 0..num_entries { assert!(db_iterator.valid()); let (_, current_index) = db_iterator.key().expect("Expected a valid key"); assert_eq!(current_index, (1 as u64) << (i * 8)); db_iterator.next(); } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } */ #[test] pub fn test_get_slot_entries1() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let entries = create_ticks(8, 0, Hash::default()); let shreds = entries_to_test_shreds(entries[0..4].to_vec(), 1, 0, false, 0); blockstore .insert_shreds(shreds, None, false) .expect("Expected successful write of shreds"); let mut shreds1 = entries_to_test_shreds(entries[4..].to_vec(), 1, 0, false, 0); for (i, b) in shreds1.iter_mut().enumerate() { b.set_index(8 + i as u32); } blockstore .insert_shreds(shreds1, None, false) .expect("Expected successful write of shreds"); assert_eq!( blockstore.get_slot_entries(1, 0).unwrap()[2..4], entries[2..4], ); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } // This test seems to be unnecessary with introduction of data shreds. There are no // guarantees that a particular shred index contains a complete entry #[test] #[ignore] pub fn test_get_slot_entries2() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Write entries let num_slots = 5_u64; let mut index = 0; for slot in 0..num_slots { let entries = create_ticks(slot + 1, 0, Hash::default()); let last_entry = entries.last().unwrap().clone(); let mut shreds = entries_to_test_shreds(entries, slot, slot.saturating_sub(1), false, 0); for b in shreds.iter_mut() { b.set_index(index); b.set_slot(slot as u64); index += 1; } blockstore .insert_shreds(shreds, None, false) .expect("Expected successful write of shreds"); assert_eq!( blockstore .get_slot_entries(slot, u64::from(index - 1)) .unwrap(), vec![last_entry], ); } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_get_slot_entries3() { // Test inserting/fetching shreds which contain multiple entries per shred let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_slots = 5_u64; let shreds_per_slot = 5_u64; let entry_serialized_size = bincode::serialized_size(&create_ticks(1, 0, Hash::default())).unwrap(); let entries_per_slot = (shreds_per_slot * PACKET_DATA_SIZE as u64) / entry_serialized_size; // Write entries for slot in 0..num_slots { let entries = create_ticks(entries_per_slot, 0, Hash::default()); let shreds = entries_to_test_shreds(entries.clone(), slot, slot.saturating_sub(1), false, 0); assert!(shreds.len() as u64 >= shreds_per_slot); blockstore .insert_shreds(shreds, None, false) .expect("Expected successful write of shreds"); assert_eq!(blockstore.get_slot_entries(slot, 0).unwrap(), entries); } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_insert_data_shreds_consecutive() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Create enough entries to ensure there are at least two shreds created let min_entries = max_ticks_per_n_shreds(1, None) + 1; for i in 0..4 { let slot = i; let parent_slot = if i == 0 { 0 } else { i - 1 }; // Write entries let num_entries = min_entries * (i + 1); let (shreds, original_entries) = make_slot_entries(slot, parent_slot, num_entries); let num_shreds = shreds.len() as u64; assert!(num_shreds > 1); let mut even_shreds = vec![]; let mut odd_shreds = vec![]; for (i, shred) in shreds.into_iter().enumerate() { if i % 2 == 0 { even_shreds.push(shred); } else { odd_shreds.push(shred); } } blockstore.insert_shreds(odd_shreds, None, false).unwrap(); assert_eq!(blockstore.get_slot_entries(slot, 0).unwrap(), vec![]); let meta = blockstore.meta(slot).unwrap().unwrap(); if num_shreds % 2 == 0 { assert_eq!(meta.received, num_shreds); } else { trace!("got here"); assert_eq!(meta.received, num_shreds - 1); } assert_eq!(meta.consumed, 0); if num_shreds % 2 == 0 { assert_eq!(meta.last_index, num_shreds - 1); } else { assert_eq!(meta.last_index, std::u64::MAX); } blockstore.insert_shreds(even_shreds, None, false).unwrap(); assert_eq!( blockstore.get_slot_entries(slot, 0).unwrap(), original_entries, ); let meta = blockstore.meta(slot).unwrap().unwrap(); assert_eq!(meta.received, num_shreds); assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.parent_slot, parent_slot); assert_eq!(meta.last_index, num_shreds - 1); } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_insert_data_shreds_duplicate() { // Create RocksDb ledger let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Make duplicate entries and shreds let num_unique_entries = 10; let (mut original_shreds, original_entries) = make_slot_entries(0, 0, num_unique_entries); // Discard first shred original_shreds.remove(0); blockstore .insert_shreds(original_shreds, None, false) .unwrap(); assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), vec![]); let duplicate_shreds = entries_to_test_shreds(original_entries.clone(), 0, 0, true, 0); let num_shreds = duplicate_shreds.len() as u64; blockstore .insert_shreds(duplicate_shreds, None, false) .unwrap(); assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), original_entries); let meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.received, num_shreds); assert_eq!(meta.parent_slot, 0); assert_eq!(meta.last_index, num_shreds - 1); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_data_set_completed_on_insert() { let ledger_path = get_tmp_ledger_path!(); let BlockstoreSignals { blockstore, .. } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap(); // Create enough entries to fill 2 shreds, only the later one is data complete let slot = 0; let num_entries = max_ticks_per_n_shreds(1, None) + 1; let entries = create_ticks(num_entries, slot, Hash::default()); let shreds = entries_to_test_shreds(entries, slot, 0, true, 0); let num_shreds = shreds.len(); assert!(num_shreds > 1); assert!(blockstore .insert_shreds(shreds[1..].to_vec(), None, false) .unwrap() .0 .is_empty()); assert_eq!( blockstore .insert_shreds(vec![shreds[0].clone()], None, false) .unwrap() .0, vec![CompletedDataSetInfo { slot, start_index: 0, end_index: num_shreds as u32 - 1 }] ); // Inserting shreds again doesn't trigger notification assert!(blockstore .insert_shreds(shreds, None, false) .unwrap() .0 .is_empty()); } #[test] pub fn test_new_shreds_signal() { // Initialize ledger let ledger_path = get_tmp_ledger_path!(); let BlockstoreSignals { blockstore: ledger, ledger_signal_receiver: recvr, .. } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap(); let ledger = Arc::new(ledger); let entries_per_slot = 50; // Create entries for slot 0 let (mut shreds, _) = make_slot_entries(0, 0, entries_per_slot); let shreds_per_slot = shreds.len() as u64; // Insert second shred, but we're missing the first shred, so no consecutive // shreds starting from slot 0, index 0 should exist. ledger .insert_shreds(vec![shreds.remove(1)], None, false) .unwrap(); let timer = Duration::new(1, 0); assert!(recvr.recv_timeout(timer).is_err()); // Insert first shred, now we've made a consecutive block ledger .insert_shreds(vec![shreds.remove(0)], None, false) .unwrap(); // Wait to get notified of update, should only be one update assert!(recvr.recv_timeout(timer).is_ok()); assert!(recvr.try_recv().is_err()); // Insert the rest of the ticks ledger.insert_shreds(shreds, None, false).unwrap(); // Wait to get notified of update, should only be one update assert!(recvr.recv_timeout(timer).is_ok()); assert!(recvr.try_recv().is_err()); // Create some other slots, and send batches of ticks for each slot such that each slot // is missing the tick at shred index == slot index - 1. Thus, no consecutive blocks // will be formed let num_slots = shreds_per_slot; let mut shreds = vec![]; let mut missing_shreds = vec![]; for slot in 1..num_slots + 1 { let (mut slot_shreds, _) = make_slot_entries(slot, slot - 1, entries_per_slot); let missing_shred = slot_shreds.remove(slot as usize - 1); shreds.extend(slot_shreds); missing_shreds.push(missing_shred); } // Should be no updates, since no new chains from block 0 were formed ledger.insert_shreds(shreds, None, false).unwrap(); assert!(recvr.recv_timeout(timer).is_err()); // Insert a shred for each slot that doesn't make a consecutive block, we // should get no updates let shreds: Vec<_> = (1..num_slots + 1) .flat_map(|slot| { let (mut shred, _) = make_slot_entries(slot, slot - 1, 1); shred[0].set_index(2 * num_slots as u32); shred }) .collect(); ledger.insert_shreds(shreds, None, false).unwrap(); assert!(recvr.recv_timeout(timer).is_err()); // For slots 1..num_slots/2, fill in the holes in one batch insertion, // so we should only get one signal let missing_shreds2 = missing_shreds .drain((num_slots / 2) as usize..) .collect_vec(); ledger.insert_shreds(missing_shreds, None, false).unwrap(); assert!(recvr.recv_timeout(timer).is_ok()); assert!(recvr.try_recv().is_err()); // Fill in the holes for each of the remaining slots, we should get a single update // for each ledger.insert_shreds(missing_shreds2, None, false).unwrap(); // Destroying database without closing it first is undefined behavior drop(ledger); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] pub fn test_completed_shreds_signal() { // Initialize ledger let ledger_path = get_tmp_ledger_path!(); let BlockstoreSignals { blockstore: ledger, completed_slots_receivers: [recvr, _], .. } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap(); let ledger = Arc::new(ledger); let entries_per_slot = 10; // Create shreds for slot 0 let (mut shreds, _) = make_slot_entries(0, 0, entries_per_slot); let shred0 = shreds.remove(0); // Insert all but the first shred in the slot, should not be considered complete ledger.insert_shreds(shreds, None, false).unwrap(); assert!(recvr.try_recv().is_err()); // Insert first shred, slot should now be considered complete ledger.insert_shreds(vec![shred0], None, false).unwrap(); assert_eq!(recvr.try_recv().unwrap(), vec![0]); } #[test] pub fn test_completed_shreds_signal_orphans() { // Initialize ledger let ledger_path = get_tmp_ledger_path!(); let BlockstoreSignals { blockstore: ledger, completed_slots_receivers: [recvr, _], .. } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap(); let ledger = Arc::new(ledger); let entries_per_slot = 10; let slots = vec![2, 5, 10]; let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot); // Get the shreds for slot 10, chaining to slot 5 let (mut orphan_child, _) = all_shreds.remove(2); // Get the shreds for slot 5 chaining to slot 2 let (mut orphan_shreds, _) = all_shreds.remove(1); // Insert all but the first shred in the slot, should not be considered complete let orphan_child0 = orphan_child.remove(0); ledger.insert_shreds(orphan_child, None, false).unwrap(); assert!(recvr.try_recv().is_err()); // Insert first shred, slot should now be considered complete ledger .insert_shreds(vec![orphan_child0], None, false) .unwrap(); assert_eq!(recvr.try_recv().unwrap(), vec![slots[2]]); // Insert the shreds for the orphan_slot let orphan_shred0 = orphan_shreds.remove(0); ledger.insert_shreds(orphan_shreds, None, false).unwrap(); assert!(recvr.try_recv().is_err()); // Insert first shred, slot should now be considered complete ledger .insert_shreds(vec![orphan_shred0], None, false) .unwrap(); assert_eq!(recvr.try_recv().unwrap(), vec![slots[1]]); } #[test] pub fn test_completed_shreds_signal_many() { // Initialize ledger let ledger_path = get_tmp_ledger_path!(); let BlockstoreSignals { blockstore: ledger, completed_slots_receivers: [recvr, _], .. } = Blockstore::open_with_signal(&ledger_path, None, true).unwrap(); let ledger = Arc::new(ledger); let entries_per_slot = 10; let mut slots = vec![2, 5, 10]; let mut all_shreds = make_chaining_slot_entries(&slots[..], entries_per_slot); let disconnected_slot = 4; let (shreds0, _) = all_shreds.remove(0); let (shreds1, _) = all_shreds.remove(0); let (shreds2, _) = all_shreds.remove(0); let (shreds3, _) = make_slot_entries(disconnected_slot, 1, entries_per_slot); let mut all_shreds: Vec<_> = vec![shreds0, shreds1, shreds2, shreds3] .into_iter() .flatten() .collect(); all_shreds.shuffle(&mut thread_rng()); ledger.insert_shreds(all_shreds, None, false).unwrap(); let mut result = recvr.try_recv().unwrap(); result.sort_unstable(); slots.push(disconnected_slot); slots.sort_unstable(); assert_eq!(result, slots); } #[test] pub fn test_handle_chaining_basic() { let blockstore_path = get_tmp_ledger_path!(); { let entries_per_slot = 5; let num_slots = 3; let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Construct the shreds let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot); let shreds_per_slot = shreds.len() / num_slots as usize; // 1) Write to the first slot let shreds1 = shreds .drain(shreds_per_slot..2 * shreds_per_slot) .collect_vec(); blockstore.insert_shreds(shreds1, None, false).unwrap(); let s1 = blockstore.meta(1).unwrap().unwrap(); assert!(s1.next_slots.is_empty()); // Slot 1 is not trunk because slot 0 hasn't been inserted yet assert!(!s1.is_connected); assert_eq!(s1.parent_slot, 0); assert_eq!(s1.last_index, shreds_per_slot as u64 - 1); // 2) Write to the second slot let shreds2 = shreds .drain(shreds_per_slot..2 * shreds_per_slot) .collect_vec(); blockstore.insert_shreds(shreds2, None, false).unwrap(); let s2 = blockstore.meta(2).unwrap().unwrap(); assert!(s2.next_slots.is_empty()); // Slot 2 is not trunk because slot 0 hasn't been inserted yet assert!(!s2.is_connected); assert_eq!(s2.parent_slot, 1); assert_eq!(s2.last_index, shreds_per_slot as u64 - 1); // Check the first slot again, it should chain to the second slot, // but still isn't part of the trunk let s1 = blockstore.meta(1).unwrap().unwrap(); assert_eq!(s1.next_slots, vec![2]); assert!(!s1.is_connected); assert_eq!(s1.parent_slot, 0); assert_eq!(s1.last_index, shreds_per_slot as u64 - 1); // 3) Write to the zeroth slot, check that every slot // is now part of the trunk blockstore.insert_shreds(shreds, None, false).unwrap(); for i in 0..3 { let s = blockstore.meta(i).unwrap().unwrap(); // The last slot will not chain to any other slots if i != 2 { assert_eq!(s.next_slots, vec![i + 1]); } if i == 0 { assert_eq!(s.parent_slot, 0); } else { assert_eq!(s.parent_slot, i - 1); } assert_eq!(s.last_index, shreds_per_slot as u64 - 1); assert!(s.is_connected); } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_handle_chaining_missing_slots() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_slots = 30; let entries_per_slot = 5; // Separate every other slot into two separate vectors let mut slots = vec![]; let mut missing_slots = vec![]; let mut shreds_per_slot = 2; for slot in 0..num_slots { let parent_slot = { if slot == 0 { 0 } else { slot - 1 } }; let (slot_shreds, _) = make_slot_entries(slot, parent_slot, entries_per_slot); shreds_per_slot = slot_shreds.len(); if slot % 2 == 1 { slots.extend(slot_shreds); } else { missing_slots.extend(slot_shreds); } } // Write the shreds for every other slot blockstore.insert_shreds(slots, None, false).unwrap(); // Check metadata for i in 0..num_slots { // If "i" is the index of a slot we just inserted, then next_slots should be empty // for slot "i" because no slots chain to that slot, because slot i + 1 is missing. // However, if it's a slot we haven't inserted, aka one of the gaps, then one of the // slots we just inserted will chain to that gap, so next_slots for that orphan slot // won't be empty, but the parent slot is unknown so should equal std::u64::MAX. let s = blockstore.meta(i as u64).unwrap().unwrap(); if i % 2 == 0 { assert_eq!(s.next_slots, vec![i as u64 + 1]); assert_eq!(s.parent_slot, std::u64::MAX); } else { assert!(s.next_slots.is_empty()); assert_eq!(s.parent_slot, i - 1); } if i == 0 { assert!(s.is_connected); } else { assert!(!s.is_connected); } } // Write the shreds for the other half of the slots that we didn't insert earlier blockstore .insert_shreds(missing_slots, None, false) .unwrap(); for i in 0..num_slots { // Check that all the slots chain correctly once the missing slots // have been filled let s = blockstore.meta(i as u64).unwrap().unwrap(); if i != num_slots - 1 { assert_eq!(s.next_slots, vec![i as u64 + 1]); } else { assert!(s.next_slots.is_empty()); } if i == 0 { assert_eq!(s.parent_slot, 0); } else { assert_eq!(s.parent_slot, i - 1); } assert_eq!(s.last_index, shreds_per_slot as u64 - 1); assert!(s.is_connected); } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] #[allow(clippy::cognitive_complexity)] pub fn test_forward_chaining_is_connected() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_slots = 15; // Create enough entries to ensure there are at least two shreds created let entries_per_slot = max_ticks_per_n_shreds(1, None) + 1; assert!(entries_per_slot > 1); let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot); let shreds_per_slot = shreds.len() / num_slots as usize; assert!(shreds_per_slot > 1); // Write the shreds such that every 3rd slot has a gap in the beginning let mut missing_shreds = vec![]; for slot in 0..num_slots { let mut shreds_for_slot = shreds.drain(..shreds_per_slot).collect_vec(); if slot % 3 == 0 { let shred0 = shreds_for_slot.remove(0); missing_shreds.push(shred0); } blockstore .insert_shreds(shreds_for_slot, None, false) .unwrap(); } // Check metadata for i in 0..num_slots { let s = blockstore.meta(i as u64).unwrap().unwrap(); // The last slot will not chain to any other slots if i as u64 != num_slots - 1 { assert_eq!(s.next_slots, vec![i as u64 + 1]); } else { assert!(s.next_slots.is_empty()); } if i == 0 { assert_eq!(s.parent_slot, 0); } else { assert_eq!(s.parent_slot, i - 1); } assert_eq!(s.last_index, shreds_per_slot as u64 - 1); // Other than slot 0, no slots should be part of the trunk if i != 0 { assert!(!s.is_connected); } else { assert!(s.is_connected); } } // Iteratively finish every 3rd slot, and check that all slots up to and including // slot_index + 3 become part of the trunk for slot_index in 0..num_slots { if slot_index % 3 == 0 { let shred = missing_shreds.remove(0); blockstore.insert_shreds(vec![shred], None, false).unwrap(); for i in 0..num_slots { let s = blockstore.meta(i as u64).unwrap().unwrap(); if i != num_slots - 1 { assert_eq!(s.next_slots, vec![i as u64 + 1]); } else { assert!(s.next_slots.is_empty()); } if i <= slot_index as u64 + 3 { assert!(s.is_connected); } else { assert!(!s.is_connected); } if i == 0 { assert_eq!(s.parent_slot, 0); } else { assert_eq!(s.parent_slot, i - 1); } assert_eq!(s.last_index, shreds_per_slot as u64 - 1); } } } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } /* #[test] pub fn test_chaining_tree() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_tree_levels = 6; assert!(num_tree_levels > 1); let branching_factor: u64 = 4; // Number of slots that will be in the tree let num_slots = (branching_factor.pow(num_tree_levels) - 1) / (branching_factor - 1); let erasure_config = ErasureConfig::default(); let entries_per_slot = erasure_config.num_data() as u64; assert!(entries_per_slot > 1); let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot); // Insert tree one slot at a time in a random order let mut slots: Vec<_> = (0..num_slots).collect(); // Get shreds for the slot slots.shuffle(&mut thread_rng()); for slot in slots { // Get shreds for the slot "slot" let slot_shreds = &mut shreds [(slot * entries_per_slot) as usize..((slot + 1) * entries_per_slot) as usize]; for shred in slot_shreds.iter_mut() { // Get the parent slot of the slot in the tree let slot_parent = { if slot == 0 { 0 } else { (slot - 1) / branching_factor } }; shred.set_parent(slot_parent); } let shared_shreds: Vec<_> = slot_shreds .iter() .cloned() .map(|shred| Arc::new(RwLock::new(shred))) .collect(); let mut coding_generator = CodingGenerator::new_from_config(&erasure_config); let coding_shreds = coding_generator.next(&shared_shreds); assert_eq!(coding_shreds.len(), erasure_config.num_coding()); let mut rng = thread_rng(); // Randomly pick whether to insert erasure or coding shreds first if rng.gen_bool(0.5) { blockstore.write_shreds(slot_shreds).unwrap(); blockstore.put_shared_coding_shreds(&coding_shreds).unwrap(); } else { blockstore.put_shared_coding_shreds(&coding_shreds).unwrap(); blockstore.write_shreds(slot_shreds).unwrap(); } } // Make sure everything chains correctly let last_level = (branching_factor.pow(num_tree_levels - 1) - 1) / (branching_factor - 1); for slot in 0..num_slots { let slot_meta = blockstore.meta(slot).unwrap().unwrap(); assert_eq!(slot_meta.consumed, entries_per_slot); assert_eq!(slot_meta.received, entries_per_slot); assert!(slot_meta.is_connected); let slot_parent = { if slot == 0 { 0 } else { (slot - 1) / branching_factor } }; assert_eq!(slot_meta.parent_slot, slot_parent); let expected_children: HashSet<_> = { if slot >= last_level { HashSet::new() } else { let first_child_slot = min(num_slots - 1, slot * branching_factor + 1); let last_child_slot = min(num_slots - 1, (slot + 1) * branching_factor); (first_child_slot..last_child_slot + 1).collect() } }; let result: HashSet<_> = slot_meta.next_slots.iter().cloned().collect(); if expected_children.len() != 0 { assert_eq!(slot_meta.next_slots.len(), branching_factor as usize); } else { assert_eq!(slot_meta.next_slots.len(), 0); } assert_eq!(expected_children, result); } // No orphan slots should exist assert!(blockstore.orphans_cf.is_empty().unwrap()) } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } */ #[test] pub fn test_get_slots_since() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Slot doesn't exist assert!(blockstore.get_slots_since(&[0]).unwrap().is_empty()); let mut meta0 = SlotMeta::new(0, 0); blockstore.meta_cf.put(0, &meta0).unwrap(); // Slot exists, chains to nothing let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![])].into_iter().collect(); assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected); meta0.next_slots = vec![1, 2]; blockstore.meta_cf.put(0, &meta0).unwrap(); // Slot exists, chains to some other slots let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2])].into_iter().collect(); assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected); assert_eq!(blockstore.get_slots_since(&[0, 1]).unwrap(), expected); let mut meta3 = SlotMeta::new(3, 1); meta3.next_slots = vec![10, 5]; blockstore.meta_cf.put(3, &meta3).unwrap(); let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2]), (3, vec![10, 5])] .into_iter() .collect(); assert_eq!(blockstore.get_slots_since(&[0, 1, 3]).unwrap(), expected); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_orphans() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Create shreds and entries let entries_per_slot = 1; let (mut shreds, _) = make_many_slot_entries(0, 3, entries_per_slot); let shreds_per_slot = shreds.len() / 3; // Write slot 2, which chains to slot 1. We're missing slot 0, // so slot 1 is the orphan let shreds_for_slot = shreds.drain((shreds_per_slot * 2)..).collect_vec(); blockstore .insert_shreds(shreds_for_slot, None, false) .unwrap(); let meta = blockstore .meta(1) .expect("Expect database get to succeed") .unwrap(); assert!(is_orphan(&meta)); assert_eq!( blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(), vec![1] ); // Write slot 1 which chains to slot 0, so now slot 0 is the // orphan, and slot 1 is no longer the orphan. let shreds_for_slot = shreds.drain(shreds_per_slot..).collect_vec(); blockstore .insert_shreds(shreds_for_slot, None, false) .unwrap(); let meta = blockstore .meta(1) .expect("Expect database get to succeed") .unwrap(); assert!(!is_orphan(&meta)); let meta = blockstore .meta(0) .expect("Expect database get to succeed") .unwrap(); assert!(is_orphan(&meta)); assert_eq!( blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(), vec![0] ); // Write some slot that also chains to existing slots and orphan, // nothing should change let (shred4, _) = make_slot_entries(4, 0, 1); let (shred5, _) = make_slot_entries(5, 1, 1); blockstore.insert_shreds(shred4, None, false).unwrap(); blockstore.insert_shreds(shred5, None, false).unwrap(); assert_eq!( blockstore.orphans_iterator(0).unwrap().collect::<Vec<_>>(), vec![0] ); // Write zeroth slot, no more orphans blockstore.insert_shreds(shreds, None, false).unwrap(); for i in 0..3 { let meta = blockstore .meta(i) .expect("Expect database get to succeed") .unwrap(); assert!(!is_orphan(&meta)); } // Orphans cf is empty assert!(blockstore.orphans_cf.is_empty().unwrap()) } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } fn test_insert_data_shreds_slots(name: &str, should_bulk_write: bool) { let blockstore_path = get_ledger_path_from_name(name); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Create shreds and entries let num_entries = 20_u64; let mut entries = vec![]; let mut shreds = vec![]; let mut num_shreds_per_slot = 0; for slot in 0..num_entries { let parent_slot = { if slot == 0 { 0 } else { slot - 1 } }; let (mut shred, entry) = make_slot_entries(slot, parent_slot, 1); num_shreds_per_slot = shred.len() as u64; shred .iter_mut() .enumerate() .for_each(|(_, shred)| shred.set_index(0)); shreds.extend(shred); entries.extend(entry); } let num_shreds = shreds.len(); // Write shreds to the database if should_bulk_write { blockstore.insert_shreds(shreds, None, false).unwrap(); } else { for _ in 0..num_shreds { let shred = shreds.remove(0); blockstore.insert_shreds(vec![shred], None, false).unwrap(); } } for i in 0..num_entries - 1 { assert_eq!( blockstore.get_slot_entries(i, 0).unwrap()[0], entries[i as usize] ); let meta = blockstore.meta(i).unwrap().unwrap(); assert_eq!(meta.received, 1); assert_eq!(meta.last_index, 0); if i != 0 { assert_eq!(meta.parent_slot, i - 1); assert_eq!(meta.consumed, 1); } else { assert_eq!(meta.parent_slot, 0); assert_eq!(meta.consumed, num_shreds_per_slot); } } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_find_missing_data_indexes() { let slot = 0; let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Write entries let gap: u64 = 10; assert!(gap > 3); // Create enough entries to ensure there are at least two shreds created let num_entries = max_ticks_per_n_shreds(1, None) + 1; let entries = create_ticks(num_entries, 0, Hash::default()); let mut shreds = entries_to_test_shreds(entries, slot, 0, true, 0); let num_shreds = shreds.len(); assert!(num_shreds > 1); for (i, s) in shreds.iter_mut().enumerate() { s.set_index(i as u32 * gap as u32); s.set_slot(slot); } blockstore.insert_shreds(shreds, None, false).unwrap(); // Index of the first shred is 0 // Index of the second shred is "gap" // Thus, the missing indexes should then be [1, gap - 1] for the input index // range of [0, gap) let expected: Vec<u64> = (1..gap).collect(); assert_eq!( blockstore.find_missing_data_indexes(slot, 0, 0, gap, gap as usize), expected ); assert_eq!( blockstore.find_missing_data_indexes(slot, 0, 1, gap, (gap - 1) as usize), expected, ); assert_eq!( blockstore.find_missing_data_indexes(slot, 0, 0, gap - 1, (gap - 1) as usize), &expected[..expected.len() - 1], ); assert_eq!( blockstore.find_missing_data_indexes(slot, 0, gap - 2, gap, gap as usize), vec![gap - 2, gap - 1], ); assert_eq!( blockstore.find_missing_data_indexes(slot, 0, gap - 2, gap, 1), vec![gap - 2], ); assert_eq!( blockstore.find_missing_data_indexes(slot, 0, 0, gap, 1), vec![1], ); // Test with a range that encompasses a shred with index == gap which was // already inserted. let mut expected: Vec<u64> = (1..gap).collect(); expected.push(gap + 1); assert_eq!( blockstore.find_missing_data_indexes(slot, 0, 0, gap + 2, (gap + 2) as usize), expected, ); assert_eq!( blockstore.find_missing_data_indexes(slot, 0, 0, gap + 2, (gap - 1) as usize), &expected[..expected.len() - 1], ); for i in 0..num_shreds as u64 { for j in 0..i { let expected: Vec<u64> = (j..i) .flat_map(|k| { let begin = k * gap + 1; let end = (k + 1) * gap; begin..end }) .collect(); assert_eq!( blockstore.find_missing_data_indexes( slot, 0, j * gap, i * gap, ((i - j) * gap) as usize ), expected, ); } } drop(blockstore); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_find_missing_data_indexes_timeout() { let slot = 0; let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Write entries let gap: u64 = 10; let shreds: Vec<_> = (0..64) .map(|i| { Shred::new_from_data( slot, (i * gap) as u32, 0, None, false, false, i as u8, 0, (i * gap) as u32, ) }) .collect(); blockstore.insert_shreds(shreds, None, false).unwrap(); let empty: Vec<u64> = vec![]; assert_eq!( blockstore.find_missing_data_indexes(slot, timestamp(), 0, 50, 1), empty ); let expected: Vec<_> = (1..=9).collect(); assert_eq!( blockstore.find_missing_data_indexes(slot, timestamp() - 400, 0, 50, 9), expected ); drop(blockstore); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_find_missing_data_indexes_sanity() { let slot = 0; let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Early exit conditions let empty: Vec<u64> = vec![]; assert_eq!( blockstore.find_missing_data_indexes(slot, 0, 0, 0, 1), empty ); assert_eq!( blockstore.find_missing_data_indexes(slot, 0, 5, 5, 1), empty ); assert_eq!( blockstore.find_missing_data_indexes(slot, 0, 4, 3, 1), empty ); assert_eq!( blockstore.find_missing_data_indexes(slot, 0, 1, 2, 0), empty ); let entries = create_ticks(100, 0, Hash::default()); let mut shreds = entries_to_test_shreds(entries, slot, 0, true, 0); assert!(shreds.len() > 2); shreds.drain(2..); const ONE: u64 = 1; const OTHER: u64 = 4; shreds[0].set_index(ONE as u32); shreds[1].set_index(OTHER as u32); // Insert one shred at index = first_index blockstore.insert_shreds(shreds, None, false).unwrap(); const STARTS: u64 = OTHER * 2; const END: u64 = OTHER * 3; const MAX: usize = 10; // The first shred has index = first_index. Thus, for i < first_index, // given the input range of [i, first_index], the missing indexes should be // [i, first_index - 1] for start in 0..STARTS { let result = blockstore.find_missing_data_indexes( slot, 0, start, // start END, //end MAX, //max ); let expected: Vec<u64> = (start..END).filter(|i| *i != ONE && *i != OTHER).collect(); assert_eq!(result, expected); } drop(blockstore); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_no_missing_shred_indexes() { let slot = 0; let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Write entries let num_entries = 10; let entries = create_ticks(num_entries, 0, Hash::default()); let shreds = entries_to_test_shreds(entries, slot, 0, true, 0); let num_shreds = shreds.len(); blockstore.insert_shreds(shreds, None, false).unwrap(); let empty: Vec<u64> = vec![]; for i in 0..num_shreds as u64 { for j in 0..i { assert_eq!( blockstore.find_missing_data_indexes(slot, 0, j, i, (i - j) as usize), empty ); } } drop(blockstore); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_should_insert_data_shred() { solana_logger::setup(); let (mut shreds, _) = make_slot_entries(0, 0, 200); let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let last_root = RwLock::new(0); // Insert the first 5 shreds, we don't have a "is_last" shred yet blockstore .insert_shreds(shreds[0..5].to_vec(), None, false) .unwrap(); let slot_meta = blockstore.meta(0).unwrap().unwrap(); // Corrupt shred by making it too large let mut shred5 = shreds[5].clone(); shred5.payload.push(10); shred5.data_header.size = shred5.payload.len() as u16; assert_eq!( blockstore.should_insert_data_shred( &shred5, &slot_meta, &HashMap::new(), &last_root, None, false ), false ); // Trying to insert another "is_last" shred with index < the received index should fail // skip over shred 7 blockstore .insert_shreds(shreds[8..9].to_vec(), None, false) .unwrap(); let slot_meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(slot_meta.received, 9); let shred7 = { if shreds[7].is_data() { shreds[7].set_last_in_slot(); shreds[7].clone() } else { panic!("Shred in unexpected format") } }; assert_eq!( blockstore.should_insert_data_shred( &shred7, &slot_meta, &HashMap::new(), &last_root, None, false ), false ); assert!(blockstore.has_duplicate_shreds_in_slot(0)); // Insert all pending shreds let mut shred8 = shreds[8].clone(); blockstore.insert_shreds(shreds, None, false).unwrap(); let slot_meta = blockstore.meta(0).unwrap().unwrap(); // Trying to insert a shred with index > the "is_last" shred should fail if shred8.is_data() { shred8.set_slot(slot_meta.last_index + 1); } else { panic!("Shred in unexpected format") } assert_eq!( blockstore.should_insert_data_shred( &shred7, &slot_meta, &HashMap::new(), &last_root, None, false ), false ); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_is_data_shred_present() { let (shreds, _) = make_slot_entries(0, 0, 200); let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let index_cf = blockstore.db.column::<cf::Index>(); blockstore .insert_shreds(shreds[0..5].to_vec(), None, false) .unwrap(); // Insert a shred less than `slot_meta.consumed`, check that // it already exists let slot_meta = blockstore.meta(0).unwrap().unwrap(); let index = index_cf.get(0).unwrap().unwrap(); assert_eq!(slot_meta.consumed, 5); assert!(Blockstore::is_data_shred_present( &shreds[1], &slot_meta, index.data(), )); // Insert a shred, check that it already exists blockstore .insert_shreds(shreds[6..7].to_vec(), None, false) .unwrap(); let slot_meta = blockstore.meta(0).unwrap().unwrap(); let index = index_cf.get(0).unwrap().unwrap(); assert!(Blockstore::is_data_shred_present( &shreds[6], &slot_meta, index.data() ),); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_check_cache_coding_shred() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let slot = 1; let (shred, coding) = Shredder::new_coding_shred_header(slot, 11, 11, 11, 11, 10, 0); let coding_shred = Shred::new_empty_from_header(shred, DataShredHeader::default(), coding); let mut erasure_metas = HashMap::new(); let mut index_working_set = HashMap::new(); let mut just_received_coding_shreds = HashMap::new(); let mut index_meta_time = 0; assert!(blockstore.check_cache_coding_shred( coding_shred.clone(), &mut erasure_metas, &mut index_working_set, &mut just_received_coding_shreds, &mut index_meta_time, &|_shred| { panic!("no dupes"); }, false, )); // insert again fails on dupe use std::sync::atomic::{AtomicUsize, Ordering}; let counter = AtomicUsize::new(0); assert!(!blockstore.check_cache_coding_shred( coding_shred, &mut erasure_metas, &mut index_working_set, &mut just_received_coding_shreds, &mut index_meta_time, &|_shred| { counter.fetch_add(1, Ordering::Relaxed); }, false, )); assert_eq!(counter.load(Ordering::Relaxed), 1); } } #[test] pub fn test_should_insert_coding_shred() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let last_root = RwLock::new(0); let slot = 1; let (mut shred, coding) = Shredder::new_coding_shred_header(slot, 11, 11, 11, 11, 10, 0); let coding_shred = Shred::new_empty_from_header( shred.clone(), DataShredHeader::default(), coding.clone(), ); // Insert a good coding shred assert!(Blockstore::should_insert_coding_shred( &coding_shred, &last_root )); // Insertion should succeed blockstore .insert_shreds(vec![coding_shred.clone()], None, false) .unwrap(); // Trying to insert the same shred again should pass since this doesn't check for // duplicate index { assert!(Blockstore::should_insert_coding_shred( &coding_shred, &last_root )); } shred.index += 1; // Establish a baseline that works { let coding_shred = Shred::new_empty_from_header( shred.clone(), DataShredHeader::default(), coding.clone(), ); assert!(Blockstore::should_insert_coding_shred( &coding_shred, &last_root )); } // Trying to insert a shred with index < position should fail { let mut coding_shred = Shred::new_empty_from_header( shred.clone(), DataShredHeader::default(), coding.clone(), ); let index = coding_shred.coding_header.position - 1; coding_shred.set_index(index as u32); assert!(!Blockstore::should_insert_coding_shred( &coding_shred, &last_root )); } // Trying to insert shred with num_coding == 0 should fail { let mut coding_shred = Shred::new_empty_from_header( shred.clone(), DataShredHeader::default(), coding.clone(), ); coding_shred.coding_header.num_coding_shreds = 0; assert!(!Blockstore::should_insert_coding_shred( &coding_shred, &last_root )); } // Trying to insert shred with pos >= num_coding should fail { let mut coding_shred = Shred::new_empty_from_header( shred.clone(), DataShredHeader::default(), coding.clone(), ); coding_shred.coding_header.num_coding_shreds = coding_shred.coding_header.position; assert!(!Blockstore::should_insert_coding_shred( &coding_shred, &last_root )); } // Trying to insert with set_index with num_coding that would imply the last shred // has index > u32::MAX should fail { let mut coding_shred = Shred::new_empty_from_header( shred.clone(), DataShredHeader::default(), coding.clone(), ); coding_shred.common_header.fec_set_index = std::u32::MAX - 1; coding_shred.coding_header.num_coding_shreds = 3; coding_shred.common_header.index = std::u32::MAX - 1; coding_shred.coding_header.position = 0; assert!(!Blockstore::should_insert_coding_shred( &coding_shred, &last_root )); coding_shred.coding_header.num_coding_shreds = 2000; assert!(!Blockstore::should_insert_coding_shred( &coding_shred, &last_root )); // Decreasing the number of num_coding_shreds will put it within the allowed limit coding_shred.coding_header.num_coding_shreds = 2; assert!(Blockstore::should_insert_coding_shred( &coding_shred, &last_root )); // Insertion should succeed blockstore .insert_shreds(vec![coding_shred], None, false) .unwrap(); } // Trying to insert value into slot <= than last root should fail { let mut coding_shred = Shred::new_empty_from_header(shred, DataShredHeader::default(), coding); coding_shred.set_slot(*last_root.read().unwrap()); assert!(!Blockstore::should_insert_coding_shred( &coding_shred, &last_root )); } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] pub fn test_insert_multiple_is_last() { solana_logger::setup(); let (shreds, _) = make_slot_entries(0, 0, 20); let num_shreds = shreds.len() as u64; let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap(); let slot_meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(slot_meta.consumed, num_shreds); assert_eq!(slot_meta.received, num_shreds); assert_eq!(slot_meta.last_index, num_shreds - 1); assert!(slot_meta.is_full()); let (shreds, _) = make_slot_entries(0, 0, 22); blockstore.insert_shreds(shreds, None, false).unwrap(); let slot_meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(slot_meta.consumed, num_shreds); assert_eq!(slot_meta.received, num_shreds); assert_eq!(slot_meta.last_index, num_shreds - 1); assert!(slot_meta.is_full()); assert!(blockstore.has_duplicate_shreds_in_slot(0)); drop(blockstore); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_slot_data_iterator() { // Construct the shreds let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); let shreds_per_slot = 10; let slots = vec![2, 4, 8, 12]; let all_shreds = make_chaining_slot_entries(&slots, shreds_per_slot); let slot_8_shreds = all_shreds[2].0.clone(); for (slot_shreds, _) in all_shreds { blockstore.insert_shreds(slot_shreds, None, false).unwrap(); } // Slot doesnt exist, iterator should be empty let shred_iter = blockstore.slot_data_iterator(5, 0).unwrap(); let result: Vec<_> = shred_iter.collect(); assert_eq!(result, vec![]); // Test that the iterator for slot 8 contains what was inserted earlier let shred_iter = blockstore.slot_data_iterator(8, 0).unwrap(); let result: Vec<Shred> = shred_iter .filter_map(|(_, bytes)| Shred::new_from_serialized_shred(bytes.to_vec()).ok()) .collect(); assert_eq!(result.len(), slot_8_shreds.len()); assert_eq!(result, slot_8_shreds); drop(blockstore); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_set_roots() { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); let chained_slots = vec![0, 2, 4, 7, 12, 15]; assert_eq!(blockstore.last_root(), 0); blockstore.set_roots(&chained_slots).unwrap(); assert_eq!(blockstore.last_root(), 15); for i in chained_slots { assert!(blockstore.is_root(i)); } drop(blockstore); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_is_skipped() { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); let roots = vec![2, 4, 7, 12, 15]; blockstore.set_roots(&roots).unwrap(); for i in 0..20 { if i < 2 || roots.contains(&i) || i > 15 { assert!(!blockstore.is_skipped(i)); } else { assert!(blockstore.is_skipped(i)); } } drop(blockstore); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_iter_bounds() { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); // slot 5 does not exist, iter should be ok and should be a noop blockstore .slot_meta_iterator(5) .unwrap() .for_each(|_| panic!()); drop(blockstore); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_get_completed_data_ranges() { let completed_data_end_indexes = vec![2, 4, 9, 11]; // Consumed is 1, which means we're missing shred with index 1, should return empty let start_index = 0; let consumed = 1; assert_eq!( Blockstore::get_completed_data_ranges( start_index, &completed_data_end_indexes[..], consumed ), vec![] ); let start_index = 0; let consumed = 3; assert_eq!( Blockstore::get_completed_data_ranges( start_index, &completed_data_end_indexes[..], consumed ), vec![(0, 2)] ); // Test all possible ranges: // // `consumed == completed_data_end_indexes[j] + 1`, means we have all the shreds up to index // `completed_data_end_indexes[j] + 1`. Thus the completed data blocks is everything in the // range: // [start_index, completed_data_end_indexes[j]] == // [completed_data_end_indexes[i], completed_data_end_indexes[j]], for i in 0..completed_data_end_indexes.len() { for j in i..completed_data_end_indexes.len() { let start_index = completed_data_end_indexes[i]; let consumed = completed_data_end_indexes[j] + 1; // When start_index == completed_data_end_indexes[i], then that means // the shred with index == start_index is a single-shred data block, // so the start index is the end index for that data block. let mut expected = vec![(start_index, start_index)]; expected.extend( completed_data_end_indexes[i..=j] .windows(2) .map(|end_indexes| (end_indexes[0] + 1, end_indexes[1])), ); assert_eq!( Blockstore::get_completed_data_ranges( start_index, &completed_data_end_indexes[..], consumed ), expected ); } } } #[test] fn test_get_slot_entries_with_shred_count_corruption() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_ticks = 8; let entries = create_ticks(num_ticks, 0, Hash::default()); let slot = 1; let shreds = entries_to_test_shreds(entries, slot, 0, false, 0); let next_shred_index = shreds.len(); blockstore .insert_shreds(shreds, None, false) .expect("Expected successful write of shreds"); assert_eq!( blockstore.get_slot_entries(slot, 0).unwrap().len() as u64, num_ticks ); // Insert an empty shred that won't deshred into entries let shreds = vec![Shred::new_from_data( slot, next_shred_index as u32, 1, Some(&[1, 1, 1]), true, true, 0, 0, next_shred_index as u32, )]; // With the corruption, nothing should be returned, even though an // earlier data block was valid blockstore .insert_shreds(shreds, None, false) .expect("Expected successful write of shreds"); assert!(blockstore.get_slot_entries(slot, 0).is_err()); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_no_insert_but_modify_slot_meta() { // This tests correctness of the SlotMeta in various cases in which a shred // that gets filtered out by checks let (shreds0, _) = make_slot_entries(0, 0, 200); let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Insert the first 5 shreds, we don't have a "is_last" shred yet blockstore .insert_shreds(shreds0[0..5].to_vec(), None, false) .unwrap(); // Insert a repetitive shred for slot 's', should get ignored, but also // insert shreds that chains to 's', should see the update in the SlotMeta // for 's'. let (mut shreds2, _) = make_slot_entries(2, 0, 200); let (mut shreds3, _) = make_slot_entries(3, 0, 200); shreds2.push(shreds0[1].clone()); shreds3.insert(0, shreds0[1].clone()); blockstore.insert_shreds(shreds2, None, false).unwrap(); let slot_meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(slot_meta.next_slots, vec![2]); blockstore.insert_shreds(shreds3, None, false).unwrap(); let slot_meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(slot_meta.next_slots, vec![2, 3]); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_trusted_insert_shreds() { // Make shred for slot 1 let (shreds1, _) = make_slot_entries(1, 0, 1); let blockstore_path = get_tmp_ledger_path!(); let last_root = 100; { let blockstore = Blockstore::open(&blockstore_path).unwrap(); blockstore.set_roots(&[last_root]).unwrap(); // Insert will fail, slot < root blockstore .insert_shreds(shreds1[..].to_vec(), None, false) .unwrap(); assert!(blockstore.get_data_shred(1, 0).unwrap().is_none()); // Insert through trusted path will succeed blockstore .insert_shreds(shreds1[..].to_vec(), None, true) .unwrap(); assert!(blockstore.get_data_shred(1, 0).unwrap().is_some()); } } #[test] fn test_get_rooted_block() { let slot = 10; let entries = make_slot_entries_with_transactions(100); let blockhash = get_last_hash(entries.iter()).unwrap(); let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0); let more_shreds = entries_to_test_shreds(entries.clone(), slot + 1, slot, true, 0); let unrooted_shreds = entries_to_test_shreds(entries.clone(), slot + 2, slot + 1, true, 0); let ledger_path = get_tmp_ledger_path!(); let ledger = Blockstore::open(&ledger_path).unwrap(); ledger.insert_shreds(shreds, None, false).unwrap(); ledger.insert_shreds(more_shreds, None, false).unwrap(); ledger.insert_shreds(unrooted_shreds, None, false).unwrap(); ledger.set_roots(&[slot - 1, slot, slot + 1]).unwrap(); let parent_meta = SlotMeta { parent_slot: std::u64::MAX, ..SlotMeta::default() }; ledger .put_meta_bytes(slot - 1, &serialize(&parent_meta).unwrap()) .unwrap(); let expected_transactions: Vec<TransactionWithStatusMeta> = entries .iter() .cloned() .filter(|entry| !entry.is_tick()) .flat_map(|entry| entry.transactions) .map(|transaction| { let mut pre_balances: Vec<u64> = vec![]; let mut post_balances: Vec<u64> = vec![]; for (i, _account_key) in transaction.message.account_keys.iter().enumerate() { pre_balances.push(i as u64 * 10); post_balances.push(i as u64 * 11); } let signature = transaction.signatures[0]; let status = TransactionStatusMeta { status: Ok(()), fee: 42, pre_balances: pre_balances.clone(), post_balances: post_balances.clone(), inner_instructions: Some(vec![]), log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), } .into(); ledger .transaction_status_cf .put_protobuf((0, signature, slot), &status) .unwrap(); let status = TransactionStatusMeta { status: Ok(()), fee: 42, pre_balances: pre_balances.clone(), post_balances: post_balances.clone(), inner_instructions: Some(vec![]), log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), } .into(); ledger .transaction_status_cf .put_protobuf((0, signature, slot + 1), &status) .unwrap(); let status = TransactionStatusMeta { status: Ok(()), fee: 42, pre_balances: pre_balances.clone(), post_balances: post_balances.clone(), inner_instructions: Some(vec![]), log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), } .into(); ledger .transaction_status_cf .put_protobuf((0, signature, slot + 2), &status) .unwrap(); TransactionWithStatusMeta { transaction, meta: Some(TransactionStatusMeta { status: Ok(()), fee: 42, pre_balances, post_balances, inner_instructions: Some(vec![]), log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), }), } }) .collect(); // Even if marked as root, a slot that is empty of entries should return an error let confirmed_block_err = ledger.get_rooted_block(slot - 1, true).unwrap_err(); assert_matches!(confirmed_block_err, BlockstoreError::SlotUnavailable); // The previous_blockhash of `expected_block` is default because its parent slot is a root, // but empty of entries (eg. snapshot root slots). This now returns an error. let confirmed_block_err = ledger.get_rooted_block(slot, true).unwrap_err(); assert_matches!( confirmed_block_err, BlockstoreError::ParentEntriesUnavailable ); // Test if require_previous_blockhash is false let confirmed_block = ledger.get_rooted_block(slot, false).unwrap(); assert_eq!(confirmed_block.transactions.len(), 100); let expected_block = ConfirmedBlock { transactions: expected_transactions.clone(), parent_slot: slot - 1, blockhash: blockhash.to_string(), previous_blockhash: Hash::default().to_string(), rewards: vec![], block_time: None, }; assert_eq!(confirmed_block, expected_block); let confirmed_block = ledger.get_rooted_block(slot + 1, true).unwrap(); assert_eq!(confirmed_block.transactions.len(), 100); let mut expected_block = ConfirmedBlock { transactions: expected_transactions.clone(), parent_slot: slot, blockhash: blockhash.to_string(), previous_blockhash: blockhash.to_string(), rewards: vec![], block_time: None, }; assert_eq!(confirmed_block, expected_block); let not_root = ledger.get_rooted_block(slot + 2, true).unwrap_err(); assert_matches!(not_root, BlockstoreError::SlotNotRooted); let complete_block = ledger.get_complete_block(slot + 2, true).unwrap(); assert_eq!(complete_block.transactions.len(), 100); let mut expected_complete_block = ConfirmedBlock { transactions: expected_transactions, parent_slot: slot + 1, blockhash: blockhash.to_string(), previous_blockhash: blockhash.to_string(), rewards: vec![], block_time: None, }; assert_eq!(complete_block, expected_complete_block); // Test block_time returns, if available let timestamp = 1_576_183_541; ledger.blocktime_cf.put(slot + 1, &timestamp).unwrap(); expected_block.block_time = Some(timestamp); let confirmed_block = ledger.get_rooted_block(slot + 1, true).unwrap(); assert_eq!(confirmed_block, expected_block); let timestamp = 1_576_183_542; ledger.blocktime_cf.put(slot + 2, &timestamp).unwrap(); expected_complete_block.block_time = Some(timestamp); let complete_block = ledger.get_complete_block(slot + 2, true).unwrap(); assert_eq!(complete_block, expected_complete_block); drop(ledger); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); } #[test] fn test_persist_transaction_status() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let transaction_status_cf = blockstore.db.column::<cf::TransactionStatus>(); let pre_balances_vec = vec![1, 2, 3]; let post_balances_vec = vec![3, 2, 1]; let inner_instructions_vec = vec![InnerInstructions { index: 0, instructions: vec![CompiledInstruction::new(1, &(), vec![0])], }]; let log_messages_vec = vec![String::from("Test message\n")]; let pre_token_balances_vec = vec![]; let post_token_balances_vec = vec![]; // result not found assert!(transaction_status_cf .get_protobuf_or_bincode::<StoredTransactionStatusMeta>(( 0, Signature::default(), 0 )) .unwrap() .is_none()); // insert value let status = TransactionStatusMeta { status: solana_sdk::transaction::Result::<()>::Err( TransactionError::AccountNotFound, ), fee: 5u64, pre_balances: pre_balances_vec.clone(), post_balances: post_balances_vec.clone(), inner_instructions: Some(inner_instructions_vec.clone()), log_messages: Some(log_messages_vec.clone()), pre_token_balances: Some(pre_token_balances_vec.clone()), post_token_balances: Some(post_token_balances_vec.clone()), } .into(); assert!(transaction_status_cf .put_protobuf((0, Signature::default(), 0), &status,) .is_ok()); // result found let TransactionStatusMeta { status, fee, pre_balances, post_balances, inner_instructions, log_messages, pre_token_balances, post_token_balances, } = transaction_status_cf .get_protobuf_or_bincode::<StoredTransactionStatusMeta>(( 0, Signature::default(), 0, )) .unwrap() .unwrap() .try_into() .unwrap(); assert_eq!(status, Err(TransactionError::AccountNotFound)); assert_eq!(fee, 5u64); assert_eq!(pre_balances, pre_balances_vec); assert_eq!(post_balances, post_balances_vec); assert_eq!(inner_instructions.unwrap(), inner_instructions_vec); assert_eq!(log_messages.unwrap(), log_messages_vec); assert_eq!(pre_token_balances.unwrap(), pre_token_balances_vec); assert_eq!(post_token_balances.unwrap(), post_token_balances_vec); // insert value let status = TransactionStatusMeta { status: solana_sdk::transaction::Result::<()>::Ok(()), fee: 9u64, pre_balances: pre_balances_vec.clone(), post_balances: post_balances_vec.clone(), inner_instructions: Some(inner_instructions_vec.clone()), log_messages: Some(log_messages_vec.clone()), pre_token_balances: Some(pre_token_balances_vec.clone()), post_token_balances: Some(post_token_balances_vec.clone()), } .into(); assert!(transaction_status_cf .put_protobuf((0, Signature::new(&[2u8; 64]), 9), &status,) .is_ok()); // result found let TransactionStatusMeta { status, fee, pre_balances, post_balances, inner_instructions, log_messages, pre_token_balances, post_token_balances, } = transaction_status_cf .get_protobuf_or_bincode::<StoredTransactionStatusMeta>(( 0, Signature::new(&[2u8; 64]), 9, )) .unwrap() .unwrap() .try_into() .unwrap(); // deserialize assert_eq!(status, Ok(())); assert_eq!(fee, 9u64); assert_eq!(pre_balances, pre_balances_vec); assert_eq!(post_balances, post_balances_vec); assert_eq!(inner_instructions.unwrap(), inner_instructions_vec); assert_eq!(log_messages.unwrap(), log_messages_vec); assert_eq!(pre_token_balances.unwrap(), pre_token_balances_vec); assert_eq!(post_token_balances.unwrap(), post_token_balances_vec); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] #[allow(clippy::cognitive_complexity)] fn test_transaction_status_index() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let transaction_status_index_cf = blockstore.db.column::<cf::TransactionStatusIndex>(); let slot0 = 10; // Primary index column is initialized on Blockstore::open assert!(transaction_status_index_cf.get(0).unwrap().is_some()); assert!(transaction_status_index_cf.get(1).unwrap().is_some()); for _ in 0..5 { let random_bytes: Vec<u8> = (0..64).map(|_| rand::random::<u8>()).collect(); blockstore .write_transaction_status( slot0, Signature::new(&random_bytes), vec![&Pubkey::new(&random_bytes[0..32])], vec![&Pubkey::new(&random_bytes[32..])], TransactionStatusMeta::default(), ) .unwrap(); } // New statuses bump index 0 max_slot assert_eq!( transaction_status_index_cf.get(0).unwrap().unwrap(), TransactionStatusIndexMeta { max_slot: slot0, frozen: false, } ); assert_eq!( transaction_status_index_cf.get(1).unwrap().unwrap(), TransactionStatusIndexMeta::default() ); let first_status_entry = blockstore .db .iter::<cf::TransactionStatus>(IteratorMode::From( cf::TransactionStatus::as_index(0), IteratorDirection::Forward, )) .unwrap() .next() .unwrap() .0; assert_eq!(first_status_entry.0, 0); assert_eq!(first_status_entry.2, slot0); let first_address_entry = blockstore .db .iter::<cf::AddressSignatures>(IteratorMode::From( cf::AddressSignatures::as_index(0), IteratorDirection::Forward, )) .unwrap() .next() .unwrap() .0; assert_eq!(first_address_entry.0, 0); assert_eq!(first_address_entry.2, slot0); blockstore.run_purge(0, 8, PurgeType::PrimaryIndex).unwrap(); // First successful prune freezes index 0 assert_eq!( transaction_status_index_cf.get(0).unwrap().unwrap(), TransactionStatusIndexMeta { max_slot: slot0, frozen: true, } ); assert_eq!( transaction_status_index_cf.get(1).unwrap().unwrap(), TransactionStatusIndexMeta::default() ); let slot1 = 20; for _ in 0..5 { let random_bytes: Vec<u8> = (0..64).map(|_| rand::random::<u8>()).collect(); blockstore .write_transaction_status( slot1, Signature::new(&random_bytes), vec![&Pubkey::new(&random_bytes[0..32])], vec![&Pubkey::new(&random_bytes[32..])], TransactionStatusMeta::default(), ) .unwrap(); } assert_eq!( transaction_status_index_cf.get(0).unwrap().unwrap(), TransactionStatusIndexMeta { max_slot: slot0, frozen: true, } ); // Index 0 is frozen, so new statuses bump index 1 max_slot assert_eq!( transaction_status_index_cf.get(1).unwrap().unwrap(), TransactionStatusIndexMeta { max_slot: slot1, frozen: false, } ); // Index 0 statuses and address records still exist let first_status_entry = blockstore .db .iter::<cf::TransactionStatus>(IteratorMode::From( cf::TransactionStatus::as_index(0), IteratorDirection::Forward, )) .unwrap() .next() .unwrap() .0; assert_eq!(first_status_entry.0, 0); assert_eq!(first_status_entry.2, 10); let first_address_entry = blockstore .db .iter::<cf::AddressSignatures>(IteratorMode::From( cf::AddressSignatures::as_index(0), IteratorDirection::Forward, )) .unwrap() .next() .unwrap() .0; assert_eq!(first_address_entry.0, 0); assert_eq!(first_address_entry.2, slot0); // New statuses and address records are stored in index 1 let index1_first_status_entry = blockstore .db .iter::<cf::TransactionStatus>(IteratorMode::From( cf::TransactionStatus::as_index(1), IteratorDirection::Forward, )) .unwrap() .next() .unwrap() .0; assert_eq!(index1_first_status_entry.0, 1); assert_eq!(index1_first_status_entry.2, slot1); let index1_first_address_entry = blockstore .db .iter::<cf::AddressSignatures>(IteratorMode::From( cf::AddressSignatures::as_index(1), IteratorDirection::Forward, )) .unwrap() .next() .unwrap() .0; assert_eq!(index1_first_address_entry.0, 1); assert_eq!(index1_first_address_entry.2, slot1); blockstore .run_purge(0, 18, PurgeType::PrimaryIndex) .unwrap(); // Successful prune toggles TransactionStatusIndex assert_eq!( transaction_status_index_cf.get(0).unwrap().unwrap(), TransactionStatusIndexMeta { max_slot: 0, frozen: false, } ); assert_eq!( transaction_status_index_cf.get(1).unwrap().unwrap(), TransactionStatusIndexMeta { max_slot: slot1, frozen: true, } ); // Index 0 has been pruned, so first status and address entries are now index 1 let first_status_entry = blockstore .db .iter::<cf::TransactionStatus>(IteratorMode::From( cf::TransactionStatus::as_index(0), IteratorDirection::Forward, )) .unwrap() .next() .unwrap() .0; assert_eq!(first_status_entry.0, 1); assert_eq!(first_status_entry.2, slot1); let first_address_entry = blockstore .db .iter::<cf::AddressSignatures>(IteratorMode::From( cf::AddressSignatures::as_index(0), IteratorDirection::Forward, )) .unwrap() .next() .unwrap() .0; assert_eq!(first_address_entry.0, 1); assert_eq!(first_address_entry.2, slot1); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_get_transaction_status() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); // TransactionStatus column opens initialized with one entry at index 2 let transaction_status_cf = blockstore.db.column::<cf::TransactionStatus>(); let pre_balances_vec = vec![1, 2, 3]; let post_balances_vec = vec![3, 2, 1]; let status = TransactionStatusMeta { status: solana_sdk::transaction::Result::<()>::Ok(()), fee: 42u64, pre_balances: pre_balances_vec, post_balances: post_balances_vec, inner_instructions: Some(vec![]), log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), } .into(); let signature1 = Signature::new(&[1u8; 64]); let signature2 = Signature::new(&[2u8; 64]); let signature3 = Signature::new(&[3u8; 64]); let signature4 = Signature::new(&[4u8; 64]); let signature5 = Signature::new(&[5u8; 64]); let signature6 = Signature::new(&[6u8; 64]); let signature7 = Signature::new(&[7u8; 64]); // Insert slots with fork // 0 (root) // / \ // 1 | // 2 (root) // | // 3 let meta0 = SlotMeta::new(0, 0); blockstore.meta_cf.put(0, &meta0).unwrap(); let meta1 = SlotMeta::new(1, 0); blockstore.meta_cf.put(1, &meta1).unwrap(); let meta2 = SlotMeta::new(2, 0); blockstore.meta_cf.put(2, &meta2).unwrap(); let meta3 = SlotMeta::new(3, 2); blockstore.meta_cf.put(3, &meta3).unwrap(); blockstore.set_roots(&[0, 2]).unwrap(); // Initialize index 0, including: // signature2 in non-root and root, // signature4 in non-root, // signature5 in skipped slot and non-root, // signature6 in skipped slot, transaction_status_cf .put_protobuf((0, signature2, 1), &status) .unwrap(); transaction_status_cf .put_protobuf((0, signature2, 2), &status) .unwrap(); transaction_status_cf .put_protobuf((0, signature4, 1), &status) .unwrap(); transaction_status_cf .put_protobuf((0, signature5, 1), &status) .unwrap(); transaction_status_cf .put_protobuf((0, signature5, 3), &status) .unwrap(); transaction_status_cf .put_protobuf((0, signature6, 1), &status) .unwrap(); // Initialize index 1, including: // signature4 in root, // signature6 in non-root, // signature5 extra entries transaction_status_cf .put_protobuf((1, signature4, 2), &status) .unwrap(); transaction_status_cf .put_protobuf((1, signature5, 4), &status) .unwrap(); transaction_status_cf .put_protobuf((1, signature5, 5), &status) .unwrap(); transaction_status_cf .put_protobuf((1, signature6, 3), &status) .unwrap(); // Signature exists, root found in index 0 if let (Some((slot, _status)), counter) = blockstore .get_transaction_status_with_counter(signature2, &[]) .unwrap() { assert_eq!(slot, 2); assert_eq!(counter, 2); } // Signature exists, root found although not required if let (Some((slot, _status)), counter) = blockstore .get_transaction_status_with_counter(signature2, &[3]) .unwrap() { assert_eq!(slot, 2); assert_eq!(counter, 2); } // Signature exists, root found in index 1 if let (Some((slot, _status)), counter) = blockstore .get_transaction_status_with_counter(signature4, &[]) .unwrap() { assert_eq!(slot, 2); assert_eq!(counter, 3); } // Signature exists, root found although not required, in index 1 if let (Some((slot, _status)), counter) = blockstore .get_transaction_status_with_counter(signature4, &[3]) .unwrap() { assert_eq!(slot, 2); assert_eq!(counter, 3); } // Signature exists, no root found let (status, counter) = blockstore .get_transaction_status_with_counter(signature5, &[]) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 6); // Signature exists, root not required if let (Some((slot, _status)), counter) = blockstore .get_transaction_status_with_counter(signature5, &[3]) .unwrap() { assert_eq!(slot, 3); assert_eq!(counter, 2); } // Signature does not exist, smaller than existing entries let (status, counter) = blockstore .get_transaction_status_with_counter(signature1, &[]) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); let (status, counter) = blockstore .get_transaction_status_with_counter(signature1, &[3]) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); // Signature does not exist, between existing entries let (status, counter) = blockstore .get_transaction_status_with_counter(signature3, &[]) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); let (status, counter) = blockstore .get_transaction_status_with_counter(signature3, &[3]) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); // Signature does not exist, larger than existing entries let (status, counter) = blockstore .get_transaction_status_with_counter(signature7, &[]) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); let (status, counter) = blockstore .get_transaction_status_with_counter(signature7, &[3]) .unwrap(); assert_eq!(status, None); assert_eq!(counter, 2); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_get_rooted_transaction() { let slot = 2; let entries = make_slot_entries_with_transactions(5); let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0); let ledger_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&ledger_path).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap(); blockstore.set_roots(&[slot - 1, slot]).unwrap(); let expected_transactions: Vec<TransactionWithStatusMeta> = entries .iter() .cloned() .filter(|entry| !entry.is_tick()) .flat_map(|entry| entry.transactions) .map(|transaction| { let mut pre_balances: Vec<u64> = vec![]; let mut post_balances: Vec<u64> = vec![]; for (i, _account_key) in transaction.message.account_keys.iter().enumerate() { pre_balances.push(i as u64 * 10); post_balances.push(i as u64 * 11); } let inner_instructions = Some(vec![InnerInstructions { index: 0, instructions: vec![CompiledInstruction::new(1, &(), vec![0])], }]); let log_messages = Some(vec![String::from("Test message\n")]); let pre_token_balances = Some(vec![]); let post_token_balances = Some(vec![]); let signature = transaction.signatures[0]; let status = TransactionStatusMeta { status: Ok(()), fee: 42, pre_balances: pre_balances.clone(), post_balances: post_balances.clone(), inner_instructions: inner_instructions.clone(), log_messages: log_messages.clone(), pre_token_balances: pre_token_balances.clone(), post_token_balances: post_token_balances.clone(), } .into(); blockstore .transaction_status_cf .put_protobuf((0, signature, slot), &status) .unwrap(); TransactionWithStatusMeta { transaction, meta: Some(TransactionStatusMeta { status: Ok(()), fee: 42, pre_balances, post_balances, inner_instructions, log_messages, pre_token_balances, post_token_balances, }), } }) .collect(); for transaction in expected_transactions.clone() { let signature = transaction.transaction.signatures[0]; assert_eq!( blockstore.get_rooted_transaction(signature).unwrap(), Some(ConfirmedTransaction { slot, transaction: transaction.clone(), block_time: None }) ); assert_eq!( blockstore .get_complete_transaction(signature, slot + 1) .unwrap(), Some(ConfirmedTransaction { slot, transaction, block_time: None }) ); } blockstore.run_purge(0, 2, PurgeType::PrimaryIndex).unwrap(); *blockstore.lowest_cleanup_slot.write().unwrap() = slot; for TransactionWithStatusMeta { transaction, .. } in expected_transactions { let signature = transaction.signatures[0]; assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None,); assert_eq!( blockstore .get_complete_transaction(signature, slot + 1) .unwrap(), None, ); } } #[test] fn test_get_complete_transaction() { let slot = 2; let entries = make_slot_entries_with_transactions(5); let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0); let ledger_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&ledger_path).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap(); let expected_transactions: Vec<TransactionWithStatusMeta> = entries .iter() .cloned() .filter(|entry| !entry.is_tick()) .flat_map(|entry| entry.transactions) .map(|transaction| { let mut pre_balances: Vec<u64> = vec![]; let mut post_balances: Vec<u64> = vec![]; for (i, _account_key) in transaction.message.account_keys.iter().enumerate() { pre_balances.push(i as u64 * 10); post_balances.push(i as u64 * 11); } let inner_instructions = Some(vec![InnerInstructions { index: 0, instructions: vec![CompiledInstruction::new(1, &(), vec![0])], }]); let log_messages = Some(vec![String::from("Test message\n")]); let pre_token_balances = Some(vec![]); let post_token_balances = Some(vec![]); let signature = transaction.signatures[0]; let status = TransactionStatusMeta { status: Ok(()), fee: 42, pre_balances: pre_balances.clone(), post_balances: post_balances.clone(), inner_instructions: inner_instructions.clone(), log_messages: log_messages.clone(), pre_token_balances: pre_token_balances.clone(), post_token_balances: post_token_balances.clone(), } .into(); blockstore .transaction_status_cf .put_protobuf((0, signature, slot), &status) .unwrap(); TransactionWithStatusMeta { transaction, meta: Some(TransactionStatusMeta { status: Ok(()), fee: 42, pre_balances, post_balances, inner_instructions, log_messages, pre_token_balances, post_token_balances, }), } }) .collect(); for transaction in expected_transactions.clone() { let signature = transaction.transaction.signatures[0]; assert_eq!( blockstore .get_complete_transaction(signature, slot) .unwrap(), Some(ConfirmedTransaction { slot, transaction, block_time: None }) ); assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None); } blockstore.run_purge(0, 2, PurgeType::PrimaryIndex).unwrap(); *blockstore.lowest_cleanup_slot.write().unwrap() = slot; for TransactionWithStatusMeta { transaction, .. } in expected_transactions { let signature = transaction.signatures[0]; assert_eq!( blockstore .get_complete_transaction(signature, slot) .unwrap(), None, ); assert_eq!(blockstore.get_rooted_transaction(signature).unwrap(), None,); } } #[test] fn test_empty_transaction_status() { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&blockstore_path).unwrap(); blockstore.set_roots(&[0]).unwrap(); assert_eq!( blockstore .get_rooted_transaction(Signature::default()) .unwrap(), None ); } #[test] fn test_get_confirmed_signatures_for_address() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let address0 = solana_sdk::pubkey::new_rand(); let address1 = solana_sdk::pubkey::new_rand(); let slot0 = 10; for x in 1..5 { let signature = Signature::new(&[x; 64]); blockstore .write_transaction_status( slot0, signature, vec![&address0], vec![&address1], TransactionStatusMeta::default(), ) .unwrap(); } // Purge to freeze index 0 blockstore.run_purge(0, 1, PurgeType::PrimaryIndex).unwrap(); let slot1 = 20; for x in 5..9 { let signature = Signature::new(&[x; 64]); blockstore .write_transaction_status( slot1, signature, vec![&address0], vec![&address1], TransactionStatusMeta::default(), ) .unwrap(); } blockstore.set_roots(&[slot0, slot1]).unwrap(); let all0 = blockstore .get_confirmed_signatures_for_address(address0, 0, 50) .unwrap(); assert_eq!(all0.len(), 8); for x in 1..9 { let expected_signature = Signature::new(&[x; 64]); assert_eq!(all0[x as usize - 1], expected_signature); } assert_eq!( blockstore .get_confirmed_signatures_for_address(address0, 20, 50) .unwrap() .len(), 4 ); assert_eq!( blockstore .get_confirmed_signatures_for_address(address0, 0, 10) .unwrap() .len(), 4 ); assert!(blockstore .get_confirmed_signatures_for_address(address0, 1, 5) .unwrap() .is_empty()); assert_eq!( blockstore .get_confirmed_signatures_for_address(address0, 1, 15) .unwrap() .len(), 4 ); let all1 = blockstore .get_confirmed_signatures_for_address(address1, 0, 50) .unwrap(); assert_eq!(all1.len(), 8); for x in 1..9 { let expected_signature = Signature::new(&[x; 64]); assert_eq!(all1[x as usize - 1], expected_signature); } // Purge index 0 blockstore .run_purge(0, 10, PurgeType::PrimaryIndex) .unwrap(); assert_eq!( blockstore .get_confirmed_signatures_for_address(address0, 0, 50) .unwrap() .len(), 4 ); assert_eq!( blockstore .get_confirmed_signatures_for_address(address0, 20, 50) .unwrap() .len(), 4 ); assert!(blockstore .get_confirmed_signatures_for_address(address0, 0, 10) .unwrap() .is_empty()); assert!(blockstore .get_confirmed_signatures_for_address(address0, 1, 5) .unwrap() .is_empty()); assert_eq!( blockstore .get_confirmed_signatures_for_address(address0, 1, 25) .unwrap() .len(), 4 ); // Test sort, regardless of entry order or signature value for slot in (21..25).rev() { let random_bytes: Vec<u8> = (0..64).map(|_| rand::random::<u8>()).collect(); let signature = Signature::new(&random_bytes); blockstore .write_transaction_status( slot, signature, vec![&address0], vec![&address1], TransactionStatusMeta::default(), ) .unwrap(); } blockstore.set_roots(&[21, 22, 23, 24]).unwrap(); let mut past_slot = 0; for (slot, _) in blockstore.find_address_signatures(address0, 1, 25).unwrap() { assert!(slot >= past_slot); past_slot = slot; } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_find_address_signatures_for_slot() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let address0 = solana_sdk::pubkey::new_rand(); let address1 = solana_sdk::pubkey::new_rand(); let slot1 = 1; for x in 1..5 { let signature = Signature::new(&[x; 64]); blockstore .write_transaction_status( slot1, signature, vec![&address0], vec![&address1], TransactionStatusMeta::default(), ) .unwrap(); } let slot2 = 2; for x in 5..7 { let signature = Signature::new(&[x; 64]); blockstore .write_transaction_status( slot2, signature, vec![&address0], vec![&address1], TransactionStatusMeta::default(), ) .unwrap(); } // Purge to freeze index 0 blockstore.run_purge(0, 1, PurgeType::PrimaryIndex).unwrap(); for x in 7..9 { let signature = Signature::new(&[x; 64]); blockstore .write_transaction_status( slot2, signature, vec![&address0], vec![&address1], TransactionStatusMeta::default(), ) .unwrap(); } let slot3 = 3; for x in 9..13 { let signature = Signature::new(&[x; 64]); blockstore .write_transaction_status( slot3, signature, vec![&address0], vec![&address1], TransactionStatusMeta::default(), ) .unwrap(); } blockstore.set_roots(&[slot1]).unwrap(); let slot1_signatures = blockstore .find_address_signatures_for_slot(address0, 1) .unwrap(); for (i, (slot, signature)) in slot1_signatures.iter().enumerate() { assert_eq!(*slot, slot1); assert_eq!(*signature, Signature::new(&[i as u8 + 1; 64])); } let slot2_signatures = blockstore .find_address_signatures_for_slot(address0, 2) .unwrap(); for (i, (slot, signature)) in slot2_signatures.iter().enumerate() { assert_eq!(*slot, slot2); assert_eq!(*signature, Signature::new(&[i as u8 + 5; 64])); } let slot3_signatures = blockstore .find_address_signatures_for_slot(address0, 3) .unwrap(); for (i, (slot, signature)) in slot3_signatures.iter().enumerate() { assert_eq!(*slot, slot3); assert_eq!(*signature, Signature::new(&[i as u8 + 9; 64])); } } } #[test] fn test_get_confirmed_signatures_for_address2() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); fn make_slot_entries_with_transaction_addresses(addresses: &[Pubkey]) -> Vec<Entry> { let mut entries: Vec<Entry> = Vec::new(); for address in addresses { let transaction = Transaction::new_with_compiled_instructions( &[&Keypair::new()], &[*address], Hash::default(), vec![solana_sdk::pubkey::new_rand()], vec![CompiledInstruction::new(1, &(), vec![0])], ); entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction])); let mut tick = create_ticks(1, 0, hash(&serialize(address).unwrap())); entries.append(&mut tick); } entries } let address0 = solana_sdk::pubkey::new_rand(); let address1 = solana_sdk::pubkey::new_rand(); for slot in 2..=8 { let entries = make_slot_entries_with_transaction_addresses(&[ address0, address1, address0, address1, ]); let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0); blockstore.insert_shreds(shreds, None, false).unwrap(); for (i, entry) in entries.iter().enumerate() { if slot == 4 && i == 2 { // Purge to freeze index 0 and write address-signatures in new primary index blockstore.run_purge(0, 1, PurgeType::PrimaryIndex).unwrap(); } for transaction in &entry.transactions { assert_eq!(transaction.signatures.len(), 1); blockstore .write_transaction_status( slot, transaction.signatures[0], transaction.message.account_keys.iter().collect(), vec![], TransactionStatusMeta::default(), ) .unwrap(); } } } // Add 2 slots that both descend from slot 8 for slot in 9..=10 { let entries = make_slot_entries_with_transaction_addresses(&[ address0, address1, address0, address1, ]); let shreds = entries_to_test_shreds(entries.clone(), slot, 8, true, 0); blockstore.insert_shreds(shreds, None, false).unwrap(); for entry in entries.iter() { for transaction in &entry.transactions { assert_eq!(transaction.signatures.len(), 1); blockstore .write_transaction_status( slot, transaction.signatures[0], transaction.message.account_keys.iter().collect(), vec![], TransactionStatusMeta::default(), ) .unwrap(); } } } // Leave one slot unrooted to test only returns confirmed signatures blockstore.set_roots(&[1, 2, 4, 5, 6, 7, 8]).unwrap(); let highest_confirmed_root = 8; // Fetch all rooted signatures for address 0 at once... let all0 = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, None, None, usize::MAX, ) .unwrap(); assert_eq!(all0.len(), 12); // Fetch all rooted signatures for address 1 at once... let all1 = blockstore .get_confirmed_signatures_for_address2( address1, highest_confirmed_root, None, None, usize::MAX, ) .unwrap(); assert_eq!(all1.len(), 12); // Fetch all signatures for address 0 individually for i in 0..all0.len() { let results = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, if i == 0 { None } else { Some(all0[i - 1].signature) }, None, 1, ) .unwrap(); assert_eq!(results.len(), 1); assert_eq!(results[0], all0[i], "Unexpected result for {}", i); } // Fetch all signatures for address 0 individually using `until` for i in 0..all0.len() { let results = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, if i == 0 { None } else { Some(all0[i - 1].signature) }, if i == all0.len() - 1 || i == all0.len() { None } else { Some(all0[i + 1].signature) }, 10, ) .unwrap(); assert_eq!(results.len(), 1); assert_eq!(results[0], all0[i], "Unexpected result for {}", i); } assert!(blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, Some(all0[all0.len() - 1].signature), None, 1, ) .unwrap() .is_empty()); assert!(blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, None, Some(all0[0].signature), 2, ) .unwrap() .is_empty()); // Fetch all signatures for address 0, three at a time assert!(all0.len() % 3 == 0); for i in (0..all0.len()).step_by(3) { let results = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, if i == 0 { None } else { Some(all0[i - 1].signature) }, None, 3, ) .unwrap(); assert_eq!(results.len(), 3); assert_eq!(results[0], all0[i]); assert_eq!(results[1], all0[i + 1]); assert_eq!(results[2], all0[i + 2]); } // Ensure that the signatures within a slot are reverse ordered by signature // (current limitation of the .get_confirmed_signatures_for_address2()) for i in (0..all1.len()).step_by(2) { let results = blockstore .get_confirmed_signatures_for_address2( address1, highest_confirmed_root, if i == 0 { None } else { Some(all1[i - 1].signature) }, None, 2, ) .unwrap(); assert_eq!(results.len(), 2); assert_eq!(results[0].slot, results[1].slot); assert!(results[0].signature >= results[1].signature); assert_eq!(results[0], all1[i]); assert_eq!(results[1], all1[i + 1]); } // A search for address 0 with `before` and/or `until` signatures from address1 should also work let results = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, Some(all1[0].signature), None, usize::MAX, ) .unwrap(); // The exact number of results returned is variable, based on the sort order of the // random signatures that are generated assert!(!results.is_empty()); let results2 = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, Some(all1[0].signature), Some(all1[4].signature), usize::MAX, ) .unwrap(); assert!(results2.len() < results.len()); // Duplicate all tests using confirmed signatures let highest_confirmed_slot = 10; // Fetch all signatures for address 0 at once... let all0 = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_slot, None, None, usize::MAX, ) .unwrap(); assert_eq!(all0.len(), 14); // Fetch all signatures for address 1 at once... let all1 = blockstore .get_confirmed_signatures_for_address2( address1, highest_confirmed_slot, None, None, usize::MAX, ) .unwrap(); assert_eq!(all1.len(), 14); // Fetch all signatures for address 0 individually for i in 0..all0.len() { let results = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_slot, if i == 0 { None } else { Some(all0[i - 1].signature) }, None, 1, ) .unwrap(); assert_eq!(results.len(), 1); assert_eq!(results[0], all0[i], "Unexpected result for {}", i); } // Fetch all signatures for address 0 individually using `until` for i in 0..all0.len() { let results = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_slot, if i == 0 { None } else { Some(all0[i - 1].signature) }, if i == all0.len() - 1 || i == all0.len() { None } else { Some(all0[i + 1].signature) }, 10, ) .unwrap(); assert_eq!(results.len(), 1); assert_eq!(results[0], all0[i], "Unexpected result for {}", i); } assert!(blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_slot, Some(all0[all0.len() - 1].signature), None, 1, ) .unwrap() .is_empty()); assert!(blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_slot, None, Some(all0[0].signature), 2, ) .unwrap() .is_empty()); // Fetch all signatures for address 0, three at a time assert!(all0.len() % 3 == 2); for i in (0..all0.len()).step_by(3) { let results = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_slot, if i == 0 { None } else { Some(all0[i - 1].signature) }, None, 3, ) .unwrap(); if i < 12 { assert_eq!(results.len(), 3); assert_eq!(results[2], all0[i + 2]); } else { assert_eq!(results.len(), 2); } assert_eq!(results[0], all0[i]); assert_eq!(results[1], all0[i + 1]); } // Ensure that the signatures within a slot are reverse ordered by signature // (current limitation of the .get_confirmed_signatures_for_address2()) for i in (0..all1.len()).step_by(2) { let results = blockstore .get_confirmed_signatures_for_address2( address1, highest_confirmed_slot, if i == 0 { None } else { Some(all1[i - 1].signature) }, None, 2, ) .unwrap(); assert_eq!(results.len(), 2); assert_eq!(results[0].slot, results[1].slot); assert!(results[0].signature >= results[1].signature); assert_eq!(results[0], all1[i]); assert_eq!(results[1], all1[i + 1]); } // A search for address 0 with `before` and/or `until` signatures from address1 should also work let results = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_slot, Some(all1[0].signature), None, usize::MAX, ) .unwrap(); // The exact number of results returned is variable, based on the sort order of the // random signatures that are generated assert!(!results.is_empty()); let results2 = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_slot, Some(all1[0].signature), Some(all1[4].signature), usize::MAX, ) .unwrap(); assert!(results2.len() < results.len()); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] #[allow(clippy::same_item_push)] fn test_get_last_hash() { let mut entries: Vec<Entry> = vec![]; let empty_entries_iterator = entries.iter(); assert!(get_last_hash(empty_entries_iterator).is_none()); let mut prev_hash = hash::hash(&[42u8]); for _ in 0..10 { let entry = next_entry(&prev_hash, 1, vec![]); prev_hash = entry.hash; entries.push(entry); } let entries_iterator = entries.iter(); assert_eq!(get_last_hash(entries_iterator).unwrap(), entries[9].hash); } #[test] fn test_map_transactions_to_statuses() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let transaction_status_cf = blockstore.db.column::<cf::TransactionStatus>(); let slot = 0; let mut transactions: Vec<Transaction> = vec![]; for x in 0..4 { let transaction = Transaction::new_with_compiled_instructions( &[&Keypair::new()], &[solana_sdk::pubkey::new_rand()], Hash::default(), vec![solana_sdk::pubkey::new_rand()], vec![CompiledInstruction::new(1, &(), vec![0])], ); let status = TransactionStatusMeta { status: solana_sdk::transaction::Result::<()>::Err( TransactionError::AccountNotFound, ), fee: x, pre_balances: vec![], post_balances: vec![], inner_instructions: Some(vec![]), log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), } .into(); transaction_status_cf .put_protobuf((0, transaction.signatures[0], slot), &status) .unwrap(); transactions.push(transaction); } // Push transaction that will not have matching status, as a test case transactions.push(Transaction::new_with_compiled_instructions( &[&Keypair::new()], &[solana_sdk::pubkey::new_rand()], Hash::default(), vec![solana_sdk::pubkey::new_rand()], vec![CompiledInstruction::new(1, &(), vec![0])], )); let map = blockstore.map_transactions_to_statuses(slot, transactions.into_iter()); assert_eq!(map.len(), 5); for (x, m) in map.iter().take(4).enumerate() { assert_eq!(m.meta.as_ref().unwrap().fee, x as u64); } assert_eq!(map[4].meta, None); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_write_get_perf_samples() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let num_entries: usize = 10; let mut perf_samples: Vec<(Slot, PerfSample)> = vec![]; for x in 1..num_entries + 1 { perf_samples.push(( x as u64 * 50, PerfSample { num_transactions: 1000 + x as u64, num_slots: 50, sample_period_secs: 20, }, )); } for (slot, sample) in perf_samples.iter() { blockstore.write_perf_sample(*slot, sample).unwrap(); } for x in 0..num_entries { let mut expected_samples = perf_samples[num_entries - 1 - x..].to_vec(); expected_samples.sort_by(|a, b| b.0.cmp(&a.0)); assert_eq!( blockstore.get_recent_perf_samples(x + 1).unwrap(), expected_samples ); } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_lowest_slot() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); for i in 0..10 { let slot = i; let (shreds, _) = make_slot_entries(slot, 0, 1); blockstore.insert_shreds(shreds, None, false).unwrap(); } assert_eq!(blockstore.lowest_slot(), 1); blockstore.run_purge(0, 5, PurgeType::PrimaryIndex).unwrap(); assert_eq!(blockstore.lowest_slot(), 6); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_recovery() { let slot = 1; let (data_shreds, coding_shreds, leader_schedule_cache) = setup_erasure_shreds(slot, 0, 100); let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); blockstore .insert_shreds(coding_shreds, Some(&leader_schedule_cache), false) .unwrap(); let shred_bufs: Vec<_> = data_shreds .iter() .map(|shred| shred.payload.clone()) .collect(); // Check all the data shreds were recovered for (s, buf) in data_shreds.iter().zip(shred_bufs) { assert_eq!( blockstore .get_data_shred(s.slot(), s.index() as u64) .unwrap() .unwrap(), buf ); } verify_index_integrity(&blockstore, slot); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_index_integrity() { let slot = 1; let num_entries = 100; let (data_shreds, coding_shreds, leader_schedule_cache) = setup_erasure_shreds(slot, 0, num_entries); assert!(data_shreds.len() > 3); assert!(coding_shreds.len() > 3); let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); // Test inserting all the shreds let all_shreds: Vec<_> = data_shreds .iter() .cloned() .chain(coding_shreds.iter().cloned()) .collect(); blockstore .insert_shreds(all_shreds, Some(&leader_schedule_cache), false) .unwrap(); verify_index_integrity(&blockstore, slot); blockstore.purge_and_compact_slots(0, slot); // Test inserting just the codes, enough for recovery blockstore .insert_shreds(coding_shreds.clone(), Some(&leader_schedule_cache), false) .unwrap(); verify_index_integrity(&blockstore, slot); blockstore.purge_and_compact_slots(0, slot); // Test inserting some codes, but not enough for recovery blockstore .insert_shreds( coding_shreds[..coding_shreds.len() - 1].to_vec(), Some(&leader_schedule_cache), false, ) .unwrap(); verify_index_integrity(&blockstore, slot); blockstore.purge_and_compact_slots(0, slot); // Test inserting just the codes, and some data, enough for recovery let shreds: Vec<_> = data_shreds[..data_shreds.len() - 1] .iter() .cloned() .chain(coding_shreds[..coding_shreds.len() - 1].iter().cloned()) .collect(); blockstore .insert_shreds(shreds, Some(&leader_schedule_cache), false) .unwrap(); verify_index_integrity(&blockstore, slot); blockstore.purge_and_compact_slots(0, slot); // Test inserting some codes, and some data, but enough for recovery let shreds: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1] .iter() .cloned() .chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned()) .collect(); blockstore .insert_shreds(shreds, Some(&leader_schedule_cache), false) .unwrap(); verify_index_integrity(&blockstore, slot); blockstore.purge_and_compact_slots(0, slot); // Test inserting all shreds in 2 rounds, make sure nothing is lost let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1] .iter() .cloned() .chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned()) .collect(); let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..] .iter() .cloned() .chain(coding_shreds[coding_shreds.len() / 2 - 1..].iter().cloned()) .collect(); blockstore .insert_shreds(shreds1, Some(&leader_schedule_cache), false) .unwrap(); blockstore .insert_shreds(shreds2, Some(&leader_schedule_cache), false) .unwrap(); verify_index_integrity(&blockstore, slot); blockstore.purge_and_compact_slots(0, slot); // Test not all, but enough data and coding shreds in 2 rounds to trigger recovery, // make sure nothing is lost let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1] .iter() .cloned() .chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned()) .collect(); let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..data_shreds.len() / 2] .iter() .cloned() .chain( coding_shreds[coding_shreds.len() / 2 - 1..coding_shreds.len() / 2] .iter() .cloned(), ) .collect(); blockstore .insert_shreds(shreds1, Some(&leader_schedule_cache), false) .unwrap(); blockstore .insert_shreds(shreds2, Some(&leader_schedule_cache), false) .unwrap(); verify_index_integrity(&blockstore, slot); blockstore.purge_and_compact_slots(0, slot); // Test insert shreds in 2 rounds, but not enough to trigger // recovery, make sure nothing is lost let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 2] .iter() .cloned() .chain(coding_shreds[..coding_shreds.len() / 2 - 2].iter().cloned()) .collect(); let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 2..data_shreds.len() / 2 - 1] .iter() .cloned() .chain( coding_shreds[coding_shreds.len() / 2 - 2..coding_shreds.len() / 2 - 1] .iter() .cloned(), ) .collect(); blockstore .insert_shreds(shreds1, Some(&leader_schedule_cache), false) .unwrap(); blockstore .insert_shreds(shreds2, Some(&leader_schedule_cache), false) .unwrap(); verify_index_integrity(&blockstore, slot); blockstore.purge_and_compact_slots(0, slot); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } fn setup_erasure_shreds( slot: u64, parent_slot: u64, num_entries: u64, ) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) { let entries = make_slot_entries_with_transactions(num_entries); let leader_keypair = Arc::new(Keypair::new()); let shredder = Shredder::new(slot, parent_slot, leader_keypair.clone(), 0, 0).unwrap(); let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0); let genesis_config = create_genesis_config(2).genesis_config; let bank = Arc::new(Bank::new(&genesis_config)); let mut leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank); let fixed_schedule = FixedSchedule { leader_schedule: Arc::new(LeaderSchedule::new_from_schedule(vec![ leader_keypair.pubkey() ])), start_epoch: 0, }; leader_schedule_cache.set_fixed_leader_schedule(Some(fixed_schedule)); (data_shreds, coding_shreds, Arc::new(leader_schedule_cache)) } fn verify_index_integrity(blockstore: &Blockstore, slot: u64) { let index = blockstore.get_index(slot).unwrap().unwrap(); // Test the set of data shreds in the index and in the data column // family are the same let data_iter = blockstore.slot_data_iterator(slot, 0).unwrap(); let mut num_data = 0; for ((slot, index), _) in data_iter { num_data += 1; assert!(blockstore.get_data_shred(slot, index).unwrap().is_some()); } // Test the data index doesn't have anything extra let num_data_in_index = index.data().num_shreds(); assert_eq!(num_data_in_index, num_data); // Test the set of coding shreds in the index and in the coding column // family are the same let coding_iter = blockstore.slot_coding_iterator(slot, 0).unwrap(); let mut num_coding = 0; for ((slot, index), _) in coding_iter { num_coding += 1; assert!(blockstore.get_coding_shred(slot, index).unwrap().is_some()); } // Test the data index doesn't have anything extra let num_coding_in_index = index.coding().num_shreds(); assert_eq!(num_coding_in_index, num_coding); } #[test] fn test_duplicate_slot() { let slot = 0; let entries1 = make_slot_entries_with_transactions(1); let entries2 = make_slot_entries_with_transactions(1); let leader_keypair = Arc::new(Keypair::new()); let shredder = Shredder::new(slot, 0, leader_keypair, 0, 0).unwrap(); let (shreds, _, _) = shredder.entries_to_shreds(&entries1, true, 0); let (duplicate_shreds, _, _) = shredder.entries_to_shreds(&entries2, true, 0); let shred = shreds[0].clone(); let duplicate_shred = duplicate_shreds[0].clone(); let non_duplicate_shred = shred.clone(); let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); blockstore .insert_shreds(vec![shred.clone()], None, false) .unwrap(); // No duplicate shreds exist yet assert!(!blockstore.has_duplicate_shreds_in_slot(slot)); // Check if shreds are duplicated assert_eq!( blockstore.is_shred_duplicate( slot, 0, &duplicate_shred.payload, duplicate_shred.is_data() ), Some(shred.payload.to_vec()) ); assert!(blockstore .is_shred_duplicate( slot, 0, &non_duplicate_shred.payload, duplicate_shred.is_data() ) .is_none()); // Store a duplicate shred blockstore .store_duplicate_slot(slot, shred.payload.clone(), duplicate_shred.payload.clone()) .unwrap(); // Slot is now marked as duplicate assert!(blockstore.has_duplicate_shreds_in_slot(slot)); // Check ability to fetch the duplicates let duplicate_proof = blockstore.get_duplicate_slot(slot).unwrap(); assert_eq!(duplicate_proof.shred1, shred.payload); assert_eq!(duplicate_proof.shred2, duplicate_shred.payload); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_clear_unconfirmed_slot() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let unconfirmed_slot = 9; let unconfirmed_child_slot = 10; let slots = vec![2, unconfirmed_slot, unconfirmed_child_slot]; // Insert into slot 9, mark it as dead let shreds: Vec<_> = make_chaining_slot_entries(&slots, 1) .into_iter() .flat_map(|x| x.0) .collect(); blockstore.insert_shreds(shreds, None, false).unwrap(); // Should only be one shred in slot 9 assert!(blockstore .get_data_shred(unconfirmed_slot, 0) .unwrap() .is_some()); assert!(blockstore .get_data_shred(unconfirmed_slot, 1) .unwrap() .is_none()); blockstore.set_dead_slot(unconfirmed_slot).unwrap(); // Purge the slot blockstore.clear_unconfirmed_slot(unconfirmed_slot); assert!(!blockstore.is_dead(unconfirmed_slot)); assert_eq!( blockstore .meta(unconfirmed_slot) .unwrap() .unwrap() .next_slots, vec![unconfirmed_child_slot] ); assert!(blockstore .get_data_shred(unconfirmed_slot, 0) .unwrap() .is_none()); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_update_completed_data_indexes() { let mut completed_data_indexes: Vec<u32> = vec![]; let mut shred_index = ShredIndex::default(); for i in 0..10 { shred_index.set_present(i as u64, true); assert_eq!( update_completed_data_indexes(true, i, &shred_index, &mut completed_data_indexes), vec![(i, i)] ); assert_eq!(completed_data_indexes, (0..=i).collect::<Vec<u32>>()); } } #[test] fn test_update_completed_data_indexes_out_of_order() { let mut completed_data_indexes = vec![]; let mut shred_index = ShredIndex::default(); shred_index.set_present(4, true); assert!( update_completed_data_indexes(false, 4, &shred_index, &mut completed_data_indexes) .is_empty() ); assert!(completed_data_indexes.is_empty()); shred_index.set_present(2, true); assert!( update_completed_data_indexes(false, 2, &shred_index, &mut completed_data_indexes) .is_empty() ); assert!(completed_data_indexes.is_empty()); shred_index.set_present(3, true); assert!( update_completed_data_indexes(true, 3, &shred_index, &mut completed_data_indexes) .is_empty() ); assert_eq!(completed_data_indexes, vec![3]); // Inserting data complete shred 1 now confirms the range of shreds [2, 3] // is part of the same data set shred_index.set_present(1, true); assert_eq!( update_completed_data_indexes(true, 1, &shred_index, &mut completed_data_indexes), vec![(2, 3)] ); assert_eq!(completed_data_indexes, vec![1, 3]); // Inserting data complete shred 0 now confirms the range of shreds [0] // is part of the same data set shred_index.set_present(0, true); assert_eq!( update_completed_data_indexes(true, 0, &shred_index, &mut completed_data_indexes), vec![(0, 0), (1, 1)] ); assert_eq!(completed_data_indexes, vec![0, 1, 3]); } #[test] fn test_rewards_protobuf_backward_compatability() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let rewards: Rewards = (0..100) .map(|i| Reward { pubkey: solana_sdk::pubkey::new_rand().to_string(), lamports: 42 + i, post_balance: std::u64::MAX, reward_type: Some(RewardType::Fee), }) .collect(); let protobuf_rewards: generated::Rewards = rewards.into(); let deprecated_rewards: StoredExtendedRewards = protobuf_rewards.clone().into(); for slot in 0..2 { let data = serialize(&deprecated_rewards).unwrap(); blockstore.rewards_cf.put_bytes(slot, &data).unwrap(); } for slot in 2..4 { blockstore .rewards_cf .put_protobuf(slot, &protobuf_rewards) .unwrap(); } for slot in 0..4 { assert_eq!( blockstore .rewards_cf .get_protobuf_or_bincode::<StoredExtendedRewards>(slot) .unwrap() .unwrap(), protobuf_rewards ); } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_transaction_status_protobuf_backward_compatability() { let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); let status = TransactionStatusMeta { status: Ok(()), fee: 42, pre_balances: vec![1, 2, 3], post_balances: vec![1, 2, 3], inner_instructions: Some(vec![]), log_messages: Some(vec![]), pre_token_balances: Some(vec![TransactionTokenBalance { account_index: 0, mint: Pubkey::new_unique().to_string(), ui_token_amount: UiTokenAmount { ui_amount: Some(1.1), decimals: 1, amount: "11".to_string(), ui_amount_string: "1.1".to_string(), }, }]), post_token_balances: Some(vec![TransactionTokenBalance { account_index: 0, mint: Pubkey::new_unique().to_string(), ui_token_amount: UiTokenAmount { ui_amount: None, decimals: 1, amount: "11".to_string(), ui_amount_string: "1.1".to_string(), }, }]), }; let deprecated_status: StoredTransactionStatusMeta = status.clone().into(); let protobuf_status: generated::TransactionStatusMeta = status.into(); for slot in 0..2 { let data = serialize(&deprecated_status).unwrap(); blockstore .transaction_status_cf .put_bytes((0, Signature::default(), slot), &data) .unwrap(); } for slot in 2..4 { blockstore .transaction_status_cf .put_protobuf((0, Signature::default(), slot), &protobuf_status) .unwrap(); } for slot in 0..4 { assert_eq!( blockstore .transaction_status_cf .get_protobuf_or_bincode::<StoredTransactionStatusMeta>(( 0, Signature::default(), slot )) .unwrap() .unwrap(), protobuf_status ); } } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_remove_shred_data_complete_flag() { let (mut shreds, entries) = make_slot_entries(0, 0, 1); let ledger_path = get_tmp_ledger_path!(); let ledger = Blockstore::open(&ledger_path).unwrap(); // Remove the data complete flag from the last shred shreds[0].unset_data_complete(); ledger.insert_shreds(shreds, None, false).unwrap(); // Check that the `data_complete` flag was unset in the stored shred, but the // `last_in_slot` flag is set. let stored_shred = &ledger.get_data_shreds_for_slot(0, 0).unwrap()[0]; assert!(!stored_shred.data_complete()); assert!(stored_shred.last_in_slot()); assert_eq!(entries, ledger.get_any_valid_slot_entries(0, 0)); } fn make_large_tx_entry(num_txs: usize) -> Entry { let txs: Vec<_> = (0..num_txs) .into_iter() .map(|_| { let keypair0 = Keypair::new(); let to = solana_sdk::pubkey::new_rand(); solana_sdk::system_transaction::transfer(&keypair0, &to, 1, Hash::default()) }) .collect(); Entry::new(&Hash::default(), 1, txs) } #[test] fn erasure_multiple_config() { solana_logger::setup(); let slot = 1; let parent = 0; let num_txs = 20; let entry = make_large_tx_entry(num_txs); let shreds = entries_to_test_shreds(vec![entry], slot, parent, true, 0); assert!(shreds.len() > 1); let ledger_path = get_tmp_ledger_path!(); let ledger = Blockstore::open(&ledger_path).unwrap(); let coding1 = Shredder::generate_coding_shreds(&shreds, false); let coding2 = Shredder::generate_coding_shreds(&shreds, true); for shred in &shreds { info!("shred {:?}", shred); } for shred in &coding1 { info!("coding1 {:?}", shred); } for shred in &coding2 { info!("coding2 {:?}", shred); } ledger .insert_shreds(shreds[..shreds.len() - 2].to_vec(), None, false) .unwrap(); ledger .insert_shreds(vec![coding1[0].clone(), coding2[1].clone()], None, false) .unwrap(); assert!(ledger.has_duplicate_shreds_in_slot(slot)); } #[test] fn test_large_num_coding() { solana_logger::setup(); let slot = 1; let (_data_shreds, mut coding_shreds, leader_schedule_cache) = setup_erasure_shreds(slot, 0, 100); let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); coding_shreds[1].coding_header.num_coding_shreds = u16::MAX; blockstore .insert_shreds( vec![coding_shreds[1].clone()], Some(&leader_schedule_cache), false, ) .unwrap(); // Check no coding shreds are inserted let res = blockstore.get_coding_shreds_for_slot(slot, 0).unwrap(); assert!(res.is_empty()); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } #[test] fn test_duplicate_last_index() { let num_shreds = 2; let num_entries = max_ticks_per_n_shreds(num_shreds, None); let slot = 1; let (mut shreds, _) = make_slot_entries(slot, 0, num_entries); // Mark both as last shred shreds[0].set_last_in_slot(); shreds[1].set_last_in_slot(); let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap(); assert!(blockstore.get_duplicate_slot(slot).is_some()); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } }
} }; get_until_slot_timer.stop();
DecisionTree2.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Date : 2019-02-03 15:17:08 # @Author : Vophan Lee ([email protected]) # @Link : https://www.jianshu.com/u/3e6114e983ad from sklearn.datasets import make_classification import numpy as np import math class Decision_Tree(object): """ this is a class to build the decision tree """ feature_list = [] gain_list = [] dim_list = [] index = 0 def __init__(self): super(Decision_Tree, self).__init__() self.features = 5 self.samples = 100 self.data = make_classification( n_samples=self.samples, n_features=self.features, n_classes=2) self.empirical_entropy = self.cal_emp_entropy(self.data) def cal_emp_entropy(self, data): """ calculate the empirical entropy """ data_0 = [] data_1 = [] for i in enumerate(data[1]): if i[1] == 0: data_0.append(data[0][i[0]]) else: data_1.append(data[0][i[0]]) entropy = 0 for data_ in [data_0, data_1]: entropy += - \ (len(data_) / len(data[0])) * \ math.log2(len(data_) / len(data[0])) return entropy def div_point(self, dim_data): """ decide the divided point of each feature,here we sopposed that dim_data is a continuous dataset dim_data: tuple """ def dichotomy(dim_data): div_points = np.zeros((1, self.samples)).reshape(self.samples) for i in enumerate(dim_data): if i[0] == len(dim_data) - 1: break div_points[i[0]] = (dim_data[i[0] + 1] + i[1]) / 2 return div_points dim_data = list(dim_data) dim_data = np.array(dim_data) dim_data = dim_data[:, dim_data[0].argsort()] dim_data = tuple(dim_data) div_points = dichotomy(dim_data[1]) information_gain_list = [] for i in div_points: div_index = list(div_points).index(i) + 1 front = dim_data[1][:div_index] behind = dim_data[1][div_index:] front_flag = dim_data[0][:div_index] behind_flag = dim_data[0][div_index:] front_data = (front, front_flag) behind_data = (behind, behind_flag) if len(front_data[0]) == 1 or ((front_data[1] == front_data[1][::-1]).all() and len(front_data[0]) != len(dim_data[0]) / 2): behind_entropy = self.cal_emp_entropy(behind_data) information_gain = self.empirical_entropy - \ (behind_entropy * (len(behind) / len(dim_data[0]))) information_gain_list.append(information_gain) elif len(behind_data[0]) == 1 or ((behind_data[1] == behind_data[1][::-1]).all() and len(front_data[0]) != len(dim_data[0]) / 2): front_entropy = self.cal_emp_entropy(front_data) information_gain = self.empirical_entropy - \ (front_entropy * (len(front) / len(dim_data[0]))) information_gain_list.append(information_gain) elif (front_data[1] == front_data[1][::-1]).all() and len(front_data[0]) == len(dim_data[0]) / 2: return -1, div_points[int(len(dim_data[0]) / 2 - 1)] else: front_entropy = self.cal_emp_entropy(front_data) behind_entropy = self.cal_emp_entropy(behind_data) information_gain = self.empirical_entropy - (front_entropy * (len(front) / len( dim_data[0])) + behind_entropy * (len(behind) / len(dim_data[0]))) information_gain_list.append(information_gain) max_information_gain = max(information_gain_list) return max_information_gain, div_points[information_gain_list.index(max_information_gain)] def compare_features(self): """ here we choose a maximium information gain among all features """ gain_list_tmp = [] point_list = [] for i in range(self.features): information_gain, div_point = self.div_point((self.data[1], self.data[0].transpose()[i])) gain_list_tmp.append(information_gain) point_list.append(div_point) com_matrix = np.array([ gain_list_tmp, point_list, range(self.features) ]) com_matrix = com_matrix[:, com_matrix[0].argsort()] Decision_Tree.feature_list = list(com_matrix[1]) Decision_Tree.gain_list = list(com_matrix[0]) Decision_Tree.dim_list = list(com_matrix[2]) def
(self, data): """ here is the process of planeting the tree data: without flag """ feature = Decision_Tree.feature_list[Decision_Tree.index] dim = Decision_Tree.dim_list[Decision_Tree.index] Decision_Tree.index += 1 if Decision_Tree.gain_list[Decision_Tree.feature_list.index(feature)] == -1 or Decision_Tree.index >= len(Decision_Tree.feature_list) - 1: return tree_node([x for x in data.transpose()[int(dim)] if x < feature], [x for x in data.transpose()[int(dim)] if x > feature], feature) else: return tree_node(self.planet_tree([x for x in data[0] if x < feature]),self.planet_tree([x for x in data[0] if x > feature]), feature) class tree_node(object): """ this is the node of the decision tree """ def __init__(self, left, right, data): self.left=left self.right=right self.data=data
planet_tree
index.js
import passport from 'passport'; import { googleStrategy } from './google'; import { vkStrategy } from './vk'; import { localStrategy, registerUser } from './local'; passport.use(googleStrategy); passport.use(vkStrategy); passport.use(localStrategy);
export { passport, registerUser };
passport.serializeUser((user, done) => done(null, user)); passport.deserializeUser((user, done) => done(null, user));
timers__.py
#Timers # Execute code at timed intervals import time from threading import Timer def display(msg):
#Basic timer def run_once(): display('Run Once : ') t = Timer(5, display, ['Timeout:']) t.start() run_once() print('Waiting ...') #Interval Timer # Wrap it into class class RepeatTimer(Timer): def run(self): while not self.finished.wait(self.interval): self.function(*self.args, **self.kwargs) print('Done') timer = RepeatTimer(1, display, ['Repeating ']) timer.start() print('Treading Started ') time.sleep(10) # suspend execution print('Threading finished ') timer.cancel()
print(msg + ' ' + time.strftime('%H:%M:%S'))
decorations.ts
/*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ 'use strict'; import 'vs/css!./decorations'; import { DynamicViewOverlay } from 'vs/editor/browser/view/dynamicViewOverlay'; import { Range } from 'vs/editor/common/core/range'; import { ViewContext } from 'vs/editor/common/view/viewContext'; import { RenderingContext, HorizontalRange } from 'vs/editor/common/view/renderingContext'; import { ViewModelDecoration } from 'vs/editor/common/viewModel/viewModel'; import * as viewEvents from 'vs/editor/common/view/viewEvents'; export class DecorationsOverlay extends DynamicViewOverlay { private _context: ViewContext; private _lineHeight: number; private _typicalHalfwidthCharacterWidth: number; private _renderResult: string[]; constructor(context: ViewContext) { super(); this._context = context; this._lineHeight = this._context.configuration.editor.lineHeight; this._typicalHalfwidthCharacterWidth = this._context.configuration.editor.fontInfo.typicalHalfwidthCharacterWidth; this._renderResult = null; this._context.addEventHandler(this); } public dispose(): void { this._context.removeEventHandler(this); this._context = null; this._renderResult = null; super.dispose(); } // --- begin event handlers public onConfigurationChanged(e: viewEvents.ViewConfigurationChangedEvent): boolean { if (e.lineHeight) { this._lineHeight = this._context.configuration.editor.lineHeight; } if (e.fontInfo) { this._typicalHalfwidthCharacterWidth = this._context.configuration.editor.fontInfo.typicalHalfwidthCharacterWidth; } return true; } public onDecorationsChanged(e: viewEvents.ViewDecorationsChangedEvent): boolean { return true; } public onFlushed(e: viewEvents.ViewFlushedEvent): boolean { return true; } public onLinesChanged(e: viewEvents.ViewLinesChangedEvent): boolean { return true; } public onLinesDeleted(e: viewEvents.ViewLinesDeletedEvent): boolean { return true; } public onLinesInserted(e: viewEvents.ViewLinesInsertedEvent): boolean { return true; } public onScrollChanged(e: viewEvents.ViewScrollChangedEvent): boolean { return e.scrollTopChanged || e.scrollWidthChanged; } public onZonesChanged(e: viewEvents.ViewZonesChangedEvent): boolean { return true; } // --- end event handlers public prepareRender(ctx: RenderingContext): void { let _decorations = ctx.getDecorationsInViewport(); // Keep only decorations with `className` let decorations: ViewModelDecoration[] = [], decorationsLen = 0; for (let i = 0, len = _decorations.length; i < len; i++) { let d = _decorations[i]; if (d.options.className) { decorations[decorationsLen++] = d; } } // Sort decorations for consistent render output decorations = decorations.sort((a, b) => { if (a.options.zIndex < b.options.zIndex) { return -1; } if (a.options.zIndex > b.options.zIndex) { return 1; } const aClassName = a.options.className; const bClassName = b.options.className; if (aClassName < bClassName) { return -1; } if (aClassName > bClassName) { return 1; } return Range.compareRangesUsingStarts(a.range, b.range); }); let visibleStartLineNumber = ctx.visibleRange.startLineNumber; let visibleEndLineNumber = ctx.visibleRange.endLineNumber; let output: string[] = []; for (let lineNumber = visibleStartLineNumber; lineNumber <= visibleEndLineNumber; lineNumber++) { let lineIndex = lineNumber - visibleStartLineNumber; output[lineIndex] = ''; } // Render first whole line decorations and then regular decorations this._renderWholeLineDecorations(ctx, decorations, output); this._renderNormalDecorations(ctx, decorations, output); this._renderResult = output; } private _renderWholeLineDecorations(ctx: RenderingContext, decorations: ViewModelDecoration[], output: string[]): void { let lineHeight = String(this._lineHeight); let visibleStartLineNumber = ctx.visibleRange.startLineNumber; let visibleEndLineNumber = ctx.visibleRange.endLineNumber; for (let i = 0, lenI = decorations.length; i < lenI; i++) { let d = decorations[i]; if (!d.options.isWholeLine) { continue; } let decorationOutput = ( '<div class="cdr ' + d.options.className + '" style="left:0;width:100%;height:' + lineHeight + 'px;"></div>' ); let startLineNumber = Math.max(d.range.startLineNumber, visibleStartLineNumber); let endLineNumber = Math.min(d.range.endLineNumber, visibleEndLineNumber); for (let j = startLineNumber; j <= endLineNumber; j++) { let lineIndex = j - visibleStartLineNumber; output[lineIndex] += decorationOutput; } } } private _renderNormalDecorations(ctx: RenderingContext, decorations: ViewModelDecoration[], output: string[]): void { const lineHeight = String(this._lineHeight); const visibleStartLineNumber = ctx.visibleRange.startLineNumber; let prevClassName: string = null; let prevShowIfCollapsed: boolean = false; let prevRange: Range = null; for (let i = 0, lenI = decorations.length; i < lenI; i++) { const d = decorations[i]; if (d.options.isWholeLine) { continue; } const className = d.options.className; const showIfCollapsed = d.options.showIfCollapsed; let range = d.range; if (showIfCollapsed && range.endColumn === 1 && range.endLineNumber !== range.startLineNumber) { range = new Range(range.startLineNumber, range.startColumn, range.endLineNumber - 1, this._context.model.getLineMaxColumn(range.endLineNumber - 1)); } if (prevClassName === className && prevShowIfCollapsed === showIfCollapsed && Range.areIntersectingOrTouching(prevRange, range)) { // merge into previous decoration prevRange = Range.plusRange(prevRange, range); continue; } // flush previous decoration if (prevClassName !== null) { this._renderNormalDecoration(ctx, prevRange, prevClassName, prevShowIfCollapsed, lineHeight, visibleStartLineNumber, output); } prevClassName = className; prevShowIfCollapsed = showIfCollapsed; prevRange = range; } if (prevClassName !== null) { this._renderNormalDecoration(ctx, prevRange, prevClassName, prevShowIfCollapsed, lineHeight, visibleStartLineNumber, output);
private _renderNormalDecoration(ctx: RenderingContext, range: Range, className: string, showIfCollapsed: boolean, lineHeight: string, visibleStartLineNumber: number, output: string[]): void { let linesVisibleRanges = ctx.linesVisibleRangesForRange(range, /*TODO@Alex*/className === 'findMatch'); if (!linesVisibleRanges) { return; } for (let j = 0, lenJ = linesVisibleRanges.length; j < lenJ; j++) { let lineVisibleRanges = linesVisibleRanges[j]; const lineIndex = lineVisibleRanges.lineNumber - visibleStartLineNumber; if (showIfCollapsed && lineVisibleRanges.ranges.length === 1) { const singleVisibleRange = lineVisibleRanges.ranges[0]; if (singleVisibleRange.width === 0) { // collapsed range case => make the decoration visible by faking its width lineVisibleRanges.ranges[0] = new HorizontalRange(singleVisibleRange.left, this._typicalHalfwidthCharacterWidth); } } for (let k = 0, lenK = lineVisibleRanges.ranges.length; k < lenK; k++) { const visibleRange = lineVisibleRanges.ranges[k]; const decorationOutput = ( '<div class="cdr ' + className + '" style="left:' + String(visibleRange.left) + 'px;width:' + String(visibleRange.width) + 'px;height:' + lineHeight + 'px;"></div>' ); output[lineIndex] += decorationOutput; } } } public render(startLineNumber: number, lineNumber: number): string { if (!this._renderResult) { return ''; } let lineIndex = lineNumber - startLineNumber; if (lineIndex < 0 || lineIndex >= this._renderResult.length) { return ''; } return this._renderResult[lineIndex]; } }
} }
gather-markers.ts
/** * Helpers for marking up CodeMirror editors. */ import * as py from "@andrewhead/python-program-analysis"; import { ICodeCellModel } from "@jupyterlab/cells"; import { NotebookPanel } from "@jupyterlab/notebook"; import { PanelLayout, Widget } from "@phosphor/widgets"; import { LineHandle } from "codemirror"; /* * jQuery only used for dynamically computing element height. Use Phosphor whenever possible as the * preferred user interface toolkit. */ import * as $ from "jquery"; import { CellOutput, DefSelection, EditorDef, GatherEventData, GatherModel, GatherModelEvent, IGatherObserver, OutputSelection } from "../model"; import { LabCell } from "../model/cell"; import { log } from "../util/log"; import { NotebookElementFinder } from "./element-finder"; /** * Class for a highlighted, clickable output. */ const OUTPUT_HIGHLIGHTED_CLASS = "jp-OutputArea-highlighted"; /** * Class for parent elements of a gather button in an output area. */ const GATHER_BUTTON_PARENT_CLASS = "jp-OutputArea-gather-button-parent"; /** * Class for a selected output. */ const OUTPUT_SELECTED_CLASS = "jp-OutputArea-selected"; /** * Class for a button that lets you gather an output. */ const OUTPUT_GATHER_BUTTON_CLASS = "jp-OutputArea-gatherbutton"; /** * Class for a label on a gather button on an output. */ const OUTPUT_GATHER_LABEL_CLASS = "jp-OutputArea-gatherlabel"; /** * Class for variable definition text. */ const DEFINITION_CLASS = "jp-InputArea-editor-nametext"; /** * Class for selected variable definition text. */ const DEFINITION_SELECTED_CLASS = "jp-InputArea-editor-nametext-selected"; /** * Class for a line holding a variable definition. */ const DEFINITION_LINE_SELECTED_CLASS = "jp-InputArea-editor-nameline-selected"; /** * Class for a line with a data dependency. */ const DEPENDENCY_CLASS = "jp-InputArea-editor-dependencyline"; /** * Class for a line with a data dependency in a dirty cell. */ const DIRTY_DEPENDENCY_CLASS = "jp-InputArea-editor-dirtydependencyline"; /** * Clear existing selections in the window. */ function clearSelectionsInWindow() { if (window && window.getSelection) { window.getSelection().removeAllRanges(); } else if (document.hasOwnProperty("selection")) { (document as any).selection.empty(); } } /** * Adds and manages text markers. */ export class MarkerManager implements IGatherObserver { private _model: GatherModel; private _elementFinder: NotebookElementFinder; private _defMarkers: DefMarker[] = []; private _defLineHandles: DefLineHandle[] = []; private _outputMarkers: OutputMarker[] = []; private _dependencyLineMarkers: DependencyLineMarker[] = []; /** * Construct a new marker manager. */ constructor(model: GatherModel, notebook: NotebookPanel) { this._model = model; this._model.addObserver(this); this._elementFinder = new NotebookElementFinder(notebook); /* * XXX(andrewhead): Sometimes in Chrome or Edge, "click" events get dropped when the click * occurs on the cell. Mouseup doesn't, so we use that here. */ notebook.content.node.addEventListener("mouseup", (event: MouseEvent) => { this.handleClick(event); }); } /** * Click-handler---pass on click event to markers. */ handleClick(event: MouseEvent) { this._defMarkers.forEach(marker => { marker.handleClick(event); }); } /** * Listen for changes to the gather model. */ onModelChange(eventType: GatherModelEvent, eventData: GatherEventData, model: GatherModel) { // When a cell is executed, search for definitions and output. if (eventType == GatherModelEvent.CELL_EXECUTION_LOGGED) { let cell = eventData as py.Cell; this.clearSelectablesForCell(cell); let editor = this._elementFinder.getEditor(cell); if (editor) { this.highlightDefs(editor, cell); } let outputElements = this._elementFinder.getOutputs(cell); this.highlightOutputs(cell, outputElements); } // When a cell is deleted or edited, delete all of its def markers. if (eventType == GatherModelEvent.CELL_DELETED || eventType == GatherModelEvent.CELL_EDITED) { let cell = eventData as py.Cell; this._updateDependenceHighlightsForCell(cell); this.clearSelectablesForCell(cell); } // When definitions are found, highlight them. if (eventType == GatherModelEvent.EDITOR_DEF_FOUND) { let editorDef = eventData as EditorDef; this.highlightDef(editorDef); } // When definitions are removed from the model, deselect and remove their markers. if (eventType == GatherModelEvent.EDITOR_DEF_REMOVED) { let editorDef = eventData as EditorDef; for (let i = this._defMarkers.length - 1; i >= 0; i--) { let defMarker = this._defMarkers[i]; if (defMarker.def == editorDef.def) { let defsToDeselect = this._model.selectedDefs.filter(d => d.editorDef == editorDef); for (let defToDeselect of defsToDeselect) { this._model.deselectDef(defToDeselect); } defMarker.marker.clear(); this._defMarkers.splice(i, 1); } } } // When outputs are found, highlight them. if (eventType == GatherModelEvent.OUTPUT_FOUND) { let output = eventData as CellOutput; this.highlightOutput(output); } // When outputs are removed from the model, deselect and remove their markers. if (eventType == GatherModelEvent.OUTPUT_REMOVED) { let output = eventData as CellOutput; for (let i = this._outputMarkers.length - 1; i >= 0; i--) { let outputMarker = this._outputMarkers[i]; if (outputMarker.cell == output.cell && outputMarker.outputIndex == output.outputIndex) { this._model.deselectOutput({ cell: output.cell, outputIndex: output.outputIndex }); outputMarker.destroy(); this._outputMarkers.splice(i, 1); } } } // Whenever a definition is selected, add a marker to its line. if (eventType == GatherModelEvent.DEF_SELECTED) { let defSelection = eventData as DefSelection; let editor = defSelection.editorDef.editor; let def = defSelection.editorDef.def; let lineHandle = editor.addLineClass( def.location.first_line - 1, "background", DEFINITION_LINE_SELECTED_CLASS ); this._defLineHandles.push({ def: def, lineHandle: lineHandle }); } // Whenever a definition is deselected from outside, unhighlight it. if (eventType == GatherModelEvent.DEF_DESELECTED) { let defSelection = eventData as DefSelection; this._defMarkers .filter(marker => { return ( defSelection.editorDef.def.location == marker.location && defSelection.cell.executionEventId == marker.cell.executionEventId ); }) .forEach(marker => marker.deselect()); let editorDef = defSelection.editorDef; for (let i = this._defLineHandles.length - 1; i >= 0; i--) { let defLineHandle = this._defLineHandles[i]; if (defLineHandle.def == editorDef.def) { editorDef.editor.removeLineClass( defLineHandle.lineHandle, "background", DEFINITION_LINE_SELECTED_CLASS ); } } } // Whenever an output is deselected from outside, unhighlight it. if (eventType == GatherModelEvent.OUTPUT_DESELECTED) { let outputSelection = eventData as OutputSelection; this._outputMarkers .filter(marker => { return ( marker.outputIndex == outputSelection.outputIndex && marker.cell.executionEventId == outputSelection.cell.executionEventId ); }) .forEach(marker => marker.deselect()); } // When the chosen slices change, update which lines are highlighted in the document. if ( eventType == GatherModelEvent.SLICE_SELECTED || eventType == GatherModelEvent.SLICE_DESELECTED ) { this._clearDependencyLineMarkers(); model.selectedSlices.forEach(sliceSelection => { this.highlightDependencies(sliceSelection.slice); }); } } highlightDef(editorDef: EditorDef) { let editor = editorDef.editor; let def = editorDef.def; let doc = editor.getDoc(); // Add marker for the definition symbol. let marker = doc.markText( { line: def.location.first_line - 1, ch: def.location.first_column }, { line: def.location.last_line - 1, ch: def.location.last_column }, { className: DEFINITION_CLASS } ); let defSelection = new DefSelection({ editorDef: editorDef, cell: editorDef.cell }); let clickHandler = (_: py.Cell, __: py.Location, selected: boolean, event: MouseEvent) => { if (selected) { if (!event.shiftKey) { this._model.deselectAll(); } this._model.selectDef(defSelection); } else { this._model.deselectDef(defSelection); } }; this._defMarkers.push( new DefMarker(marker, editor, def, def.location, def.node, editorDef.cell, clickHandler) ); } highlightOutput(output: CellOutput) { let selection = { cell: output.cell, outputIndex: output.outputIndex }; let outputMarker = new OutputMarker( output.element, output.outputIndex, output.cell, (selected, event: MouseEvent) => { if (selected) { if (!event.shiftKey) { this._model.deselectAll(); } this._model.selectOutput(selection); } else { this._model.deselectOutput(selection); } if (event.shiftKey) { // Don't select cells or text when multiple outputs are clicked on event.preventDefault(); event.stopPropagation(); clearSelectionsInWindow(); } } ); this._outputMarkers.push(outputMarker); } /** * Clear all def markers that belong to this editor. */ clearSelectablesForCell(cell: py.Cell) { this._model.removeEditorDefsForCell(cell.executionEventId); this._model.deselectOutputsForCell(cell.executionEventId); } /** * Highlight all of the definitions in an editor. */ highlightDefs(editor: CodeMirror.Editor, cell: py.Cell) { /** * Fetch the cell program instead of recomputing it, as it can stall the interface if we * analyze the code here. */ let cellProgram = this._model.getCellProgram(cell); if (cellProgram !== null && !cellProgram.hasError) { for (let ref of cellProgram.defs) { if (ref.type == py.SymbolType.VARIABLE) { this._model.addEditorDef({ def: ref, editor: editor, cell: cell }); } } } log("Highlighted definitions", { numActive: this._defMarkers.length }); } /** * Highlight a list of output elements. */ highlightOutputs(cell: py.Cell, outputElements: HTMLElement[]) { for (let i = 0; i < outputElements.length; i++) { let outputElement = outputElements[i]; let output = { cell: cell, element: outputElement, outputIndex: i }; this._model.addOutput(output); } log("Highlighted outputs", { numActive: this._outputMarkers.length }); } /** * Highlight dependencies in a cell at a set of locations. */ highlightDependencies(slice: py.SlicedExecution) { let defLines: number[] = []; slice.cellSlices.forEach(cellSlice => { let loggedCell = cellSlice.cell; let sliceLocations = cellSlice.slice; let liveCellWidget = this._elementFinder.getCellWidget(loggedCell); let editor = this._elementFinder.getEditor(loggedCell); if (liveCellWidget && editor) { let liveCell = new LabCell(liveCellWidget.model as ICodeCellModel); let numLines = 0; // Batch the highlight operations for each cell to spend less time updating cell height. editor.operation(() => { sliceLocations.items.forEach((loc: py.Location) => { for ( let lineNumber = loc.first_line - 1; lineNumber <= loc.last_line - 1; lineNumber++ ) { numLines += 1; let styleClass = liveCell.dirty ? DIRTY_DEPENDENCY_CLASS : DEPENDENCY_CLASS; let lineHandle = editor.addLineClass(lineNumber, "background", styleClass); this._dependencyLineMarkers.push({ editor: editor, lineHandle: lineHandle }); } }); defLines.push(numLines); }); } }); log("Added lines for defs (may be overlapping)", { defLines }); } private _clearDependencyMarkersForLine( editor: CodeMirror.Editor, lineHandle: CodeMirror.LineHandle ) { editor.removeLineClass(lineHandle, "background", DEPENDENCY_CLASS); editor.removeLineClass(lineHandle, "background", DIRTY_DEPENDENCY_CLASS); } private _updateDependenceHighlightsForCell(cell: py.Cell) { let editor = this._elementFinder.getEditor(cell); let liveCellWidget = this._elementFinder.getCellWidget(cell); let liveCell = new LabCell(liveCellWidget.model as ICodeCellModel); this._dependencyLineMarkers .filter(marker => marker.editor == editor) .forEach(marker => { this._clearDependencyMarkersForLine(marker.editor, marker.lineHandle); let styleClass = liveCell.dirty ? DIRTY_DEPENDENCY_CLASS : DEPENDENCY_CLASS; marker.editor.addLineClass(marker.lineHandle, "background", styleClass); }); } private _clearDependencyLineMarkers() { log("Cleared all dependency line markers"); this._dependencyLineMarkers.forEach(marker => { this._clearDependencyMarkersForLine(marker.editor, marker.lineHandle); }); this._dependencyLineMarkers = []; } } type DependencyLineMarker = { editor: CodeMirror.Editor; lineHandle: CodeMirror.LineHandle; }; /** * Marker for an output. */ class OutputMarker { constructor( outputElement: HTMLElement, outputIndex: number, cell: py.Cell, onToggle: (selected: boolean, event: MouseEvent) => void ) { this._element = outputElement; this._element.classList.add(OUTPUT_HIGHLIGHTED_CLASS); this._addSelectionButton(); this.outputIndex = outputIndex; this.cell = cell; this._onToggle = onToggle; this._clickListener = (event: MouseEvent) => { let target = event.target as HTMLElement; // If the click is on a child of the output area (the actual content), then handle // that click event like normal without selecting the output. if ( !target || !( target.classList.contains(OUTPUT_HIGHLIGHTED_CLASS) || target.classList.contains(OUTPUT_GATHER_BUTTON_CLASS) || target.classList.contains(OUTPUT_GATHER_LABEL_CLASS) ) ) return; if (this._onToggle) { this._toggleSelected(); this._onToggle(this._selected, event); } log("Clicked on output area", { outputIndex, cell, toggledOn: this._selected }); }; this._element.addEventListener("click", this._clickListener); } private _relaxParentOverflowVisibility() { let parentElement = this._element; while (parent != null) { parentElement.classList.add(GATHER_BUTTON_PARENT_CLASS); if (parentElement.classList.contains("jp-OutputArea")) { break; } parentElement = parentElement.parentElement; } } private _addSelectionButton() { this._gatherButton = new Widget({ node: document.createElement("div") }); this._gatherButton.addClass(OUTPUT_GATHER_BUTTON_CLASS); this._gatherButton.layout = new PanelLayout(); this._gatherLabel = new Widget({ node: document.createElement("p") }); this._gatherLabel.addClass(OUTPUT_GATHER_LABEL_CLASS); this._gatherLabel.node.textContent = "Gather"; (this._gatherButton.layout as PanelLayout).addWidget(this._gatherLabel); this._relaxParentOverflowVisibility(); this._element.appendChild(this._gatherButton.node); var buttonHeight = -$(this._gatherButton.node).outerHeight(); this._gatherButton.node.style["top"] = buttonHeight + "px"; } private _toggleSelected() { if (this._selected) this.deselect(); else if (!this._selected) this.select(); } select() { this._selected = true; this._element.classList.add(OUTPUT_SELECTED_CLASS); } deselect() { this._selected = false; this._element.classList.remove(OUTPUT_SELECTED_CLASS); } destroy() { this.deselect(); this._element.classList.remove(OUTPUT_HIGHLIGHTED_CLASS); this._element.removeEventListener("click", this._clickListener); } readonly outputIndex: number; readonly cell: py.Cell; private _element: HTMLElement; private _gatherButton: Widget; private _gatherLabel: Widget; private _clickListener: (_: MouseEvent) => void; private _onToggle: (selected: boolean, event: MouseEvent) => void; private _selected: boolean = false; } /** * Line handle for a definition line. */ type DefLineHandle = { def: py.Ref; lineHandle: LineHandle; }; /** * Marker for a variable definition. */ class
{ constructor( marker: CodeMirror.TextMarker, editor: CodeMirror.Editor, def: py.Ref, location: py.Location, statement: py.SyntaxNode, cell: py.Cell, clickHandler: ( cell: py.Cell, selection: py.Location, selected: boolean, event: MouseEvent ) => void ) { this.marker = marker; this.def = def; this.editor = editor; this.location = location; this.statement = statement; this.cell = cell; this.clickHandler = clickHandler; } handleClick(event: MouseEvent) { let editor = this.editor; if (editor.getWrapperElement().contains(event.target as Node)) { // In Chrome, if you click in the top of an editor's text area, it will trigger this // event, and is considered as a click at the start of the box. This filter for // span elements filters out those spurious clicks. let target = event.target as HTMLElement; let badTarget = !target.tagName || target.tagName != "SPAN" || !target.classList.contains(DEFINITION_CLASS); if (badTarget) return; let clickPosition: CodeMirror.Position = editor.coordsChar({ left: event.clientX, top: event.clientY }); let editorMarkers = editor.getDoc().findMarksAt(clickPosition); if (editorMarkers.indexOf(this.marker) != -1) { if (this.clickHandler) { this.toggleSelected(); log("Clicked on definition", { toggledOn: this._selected, cell: this.cell }); this.clickHandler(this.cell, this.location, this._selected, event); } event.preventDefault(); } } } toggleSelected() { if (this._selected) this.deselect(); else if (!this._selected) this.select(); } select() { this._selected = true; let markerPos = this.marker.find(); this._selectionMarker = this.editor.getDoc().markText(markerPos.from, markerPos.to, { className: DEFINITION_SELECTED_CLASS }); } deselect() { this._selected = false; if (this._selectionMarker) { this._selectionMarker.clear(); this._selectionMarker = undefined; } } private _selected: boolean = false; private _selectionMarker: CodeMirror.TextMarker = undefined; readonly marker: CodeMirror.TextMarker; readonly editor: CodeMirror.Editor; readonly def: py.Ref; readonly location: py.Location; readonly statement: py.SyntaxNode; readonly cell: py.Cell; readonly clickHandler: ( cell: py.Cell, selection: py.Location, selected: boolean, event: MouseEvent ) => void; }
DefMarker
input_converter.py
# input_converter.py # author: Playinf # email: [email protected] import os import six import json import random import argparse import tensorflow as tf def load_vocab(filename): fd = open(filename, "r") count = 0 vocab = {} for line in fd: word = line.strip() vocab[word] = count count += 1 fd.close() return vocab def to_json(dictionary): """ Convert python dictionary to JSON format """ return json.dumps(dictionary) def to_dictionary(example): """ Convert JSON/tf.train.Example to python dictionary """ if isinstance(example, str): dictionary = json.loads(example) elif isinstance(example, tf.train.Example): dictionary = {} keys = example.features.feature.keys() values = example.features.feature.values() for (k, v) in zip(keys, values): int64_list = list(v.int64_list.value) float_list = list(v.float_list.value) bytes_list = list(v.bytes_list.value) if int64_list: dictionary[k] = int64_list elif float_list: dictionary[k] = float_list elif bytes_list: dictionary[k] = bytes_list else: raise ValueError("All lists are empty.") else: raise ValueError("Unsupported format") return dictionary def to_example(dictionary): """ Convert python dictionary to tf.train.Example """ features = {} for (k, v) in six.iteritems(dictionary): if not v: raise ValueError("Empty generated field: %s", str((k, v))) if isinstance(v[0], six.integer_types): int64_list = tf.train.Int64List(value=v) features[k] = tf.train.Feature(int64_list=int64_list) elif isinstance(v[0], float): float_list = tf.train.FloatList(value=v) features[k] = tf.train.Feature(float_list=float_list) elif isinstance(v[0], six.string_types): bytes_list = tf.train.BytesList(value=v) features[k] = tf.train.Feature(bytes_list=bytes_list) else: raise ValueError("Value is neither an int nor a float; " "v: %s type: %s" % (str(v[0]), str(type(v[0])))) return tf.train.Example(features=tf.train.Features(feature=features)) def read_records(filename): """ Read TensorFlow record """ reader = tf.python_io.tf_record_iterator(filename) records = [] for record in reader: records.append(record) if len(records) % 10000 == 0: tf.logging.info("read: %d", len(records)) return records def write_records(records, out_filename): """ Write to TensorFlow record """ writer = tf.python_io.TFRecordWriter(out_filename) for count, record in enumerate(records): writer.write(record) if count % 10000 == 0: tf.logging.info("write: %d", count) writer.close() def convert_record_to_json(pattern, output_name, output_dir, num_shards=1): """ Convert TensorFlow record to JSON format """ output_files = [] writers = [] for shard in xrange(num_shards): output_filename = "%s-%.5d-of-%.5d" % (output_name, shard, num_shards) output_file = os.path.join(output_dir, output_filename) output_files.append(output_file) writers.append(tf.gfile.GFile(output_file, "w")) filenames = tf.gfile.Glob(pattern) records = [] for filename in filenames: records.extend(read_records(filename)) counter, shard = 0, 0 for record in records: counter += 1 example = tf.train.Example() example.ParseFromString(record) features = to_dictionary(example) json_str = to_json(features) writers[shard].write(json_str + "\n") shard = (shard + 1) % num_shards for writer in writers: writer.close() # format: # pred-pos tokens ||| labels def convert_plain_to_json(name, vocabs, output_name, output_dir, num_shards, lower=True, shuffle=True): """ Convert plain SRL data to TensorFlow record """
with open(name) as fd: for line in fd: features, labels = line.strip().split("|||") features = features.strip().split(" ") labels = labels.strip().split(" ") pred_pos = features[0] inputs = features[1:] if lower: inputs = [item.lower() for item in inputs] inputs = [vocab_token[item] if item in vocab_token else unk for item in inputs] labels = [vocab_label[item] for item in labels] preds = [0 for _ in inputs] preds[int(pred_pos)] = 1 feature = { "inputs": inputs, "preds": preds, "targets": labels } records.append(feature) if shuffle: random.shuffle(records) writers = [] output_files = [] for shard in xrange(num_shards): output_filename = "%s-%.5d-of-%.5d" % (output_name, shard, num_shards) output_file = os.path.join(output_dir, output_filename) output_files.append(output_file) writers.append(tf.gfile.GFile(output_file, "w")) counter, shard = 0, 0 for record in records: counter += 1 features = record json_str = to_json(features) writers[shard].write(json_str + "\n") shard = (shard + 1) % num_shards for writer in writers: writer.close() # format: # pred-pos tokens ||| labels def convert_plain_to_record(name, vocabs, output_name, output_dir, num_shards, lower=True, shuffle=True): """ Convert plain SRL data to TensorFlow record """ vocab_token = load_vocab(vocabs[0]) vocab_label = load_vocab(vocabs[1]) records = [] unk = vocab_token["<unk>"] with open(name) as fd: for line in fd: features, labels = line.strip().split("|||") features = features.strip().split() labels = labels.strip().split() pred_pos = features[0] inputs = features[1:] if lower: inputs = [item.lower() for item in inputs] inputs = [vocab_token[item] if item in vocab_token else unk for item in inputs] labels = [vocab_label[item] for item in labels] preds = [0 for _ in inputs] preds[int(pred_pos)] = 1 feature = { "inputs": inputs, "preds": preds, "targets": labels } records.append(feature) if shuffle: random.shuffle(records) output_files = [] writers = [] for shard in xrange(num_shards): output_filename = "%s-%.5d-of-%.5d" % (output_name, shard, num_shards) output_file = os.path.join(output_dir, output_filename) output_files.append(output_file) writers.append(tf.python_io.TFRecordWriter(output_file)) counter, shard = 0, 0 for record in records: counter += 1 example = to_example(record) writers[shard].write(example.SerializeToString()) shard = (shard + 1) % num_shards for writer in writers: writer.close() def parse_args(): msg = "convert srl data to TensorFlow record format" usage = "srl_input_converter.py [<args>] [-h | --help]" parser = argparse.ArgumentParser(description=msg, usage=usage) msg = "path of source file" parser.add_argument("--input_path", required=True, type=str, help=msg) msg = "output name" parser.add_argument("--output_name", required=True, type=str, help=msg) msg = "output directory" parser.add_argument("--output_dir", required=True, type=str, help=msg) msg = "path of vocabulary" parser.add_argument("--vocab", type=str, nargs=2, help=msg) msg = "number of output shards" parser.add_argument("--num_shards", default=100, type=int, help=msg) msg = "shuffle inputs" parser.add_argument("--shuffle", action="store_true", help=msg) msg = "use lowercase" parser.add_argument("--lower", action="store_true", help=msg) return parser.parse_args() if __name__ == "__main__": args = parse_args() convert_plain_to_record(args.input_path, args.vocab, args.output_name, args.output_dir, args.num_shards, args.lower, args.shuffle)
vocab_token = load_vocab(vocabs[0]) vocab_label = load_vocab(vocabs[1]) records = [] unk = vocab_token["<unk>"]
models.rs
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AbsoluteDeleteOption { #[serde(flatten)] pub delete_option: DeleteOption, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecoveryPointsFilters { #[serde(rename = "restorePointDataStoreId", default, skip_serializing_if = "Option::is_none")] pub restore_point_data_store_id: Option<String>, #[serde(rename = "isVisible", default, skip_serializing_if = "Option::is_none")] pub is_visible: Option<bool>, #[serde(rename = "startDate", default, skip_serializing_if = "Option::is_none")] pub start_date: Option<String>, #[serde(rename = "endDate", default, skip_serializing_if = "Option::is_none")] pub end_date: Option<String>, #[serde(rename = "extendedInfo", default, skip_serializing_if = "Option::is_none")] pub extended_info: Option<bool>, #[serde(rename = "restorePointState", default, skip_serializing_if = "Option::is_none")] pub restore_point_state: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AdHocBackupRuleOptions { #[serde(rename = "ruleName")] pub rule_name: String, #[serde(rename = "triggerOption")] pub trigger_option: AdhocBackupTriggerOption, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AdhocBackupTriggerOption { #[serde(rename = "retentionTagOverride", default, skip_serializing_if = "Option::is_none")] pub retention_tag_override: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AdhocBasedTaggingCriteria { #[serde(rename = "tagInfo", default, skip_serializing_if = "Option::is_none")] pub tag_info: Option<RetentionTag>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AdhocBasedTriggerContext { #[serde(flatten)] pub trigger_context: TriggerContext, #[serde(rename = "taggingCriteria")] pub tagging_criteria: AdhocBasedTaggingCriteria, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupDiscreteRecoveryPoint { #[serde(flatten)] pub azure_backup_recovery_point: AzureBackupRecoveryPoint, #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, #[serde(rename = "recoveryPointDataStoresDetails", default, skip_serializing_if = "Vec::is_empty")] pub recovery_point_data_stores_details: Vec<RecoveryPointDataStoreDetails>, #[serde(rename = "recoveryPointTime")] pub recovery_point_time: String, #[serde(rename = "policyName", default, skip_serializing_if = "Option::is_none")] pub policy_name: Option<String>, #[serde(rename = "policyVersion", default, skip_serializing_if = "Option::is_none")] pub policy_version: Option<String>, #[serde(rename = "recoveryPointId", default, skip_serializing_if = "Option::is_none")] pub recovery_point_id: Option<String>, #[serde(rename = "recoveryPointType", default, skip_serializing_if = "Option::is_none")] pub recovery_point_type: Option<String>, #[serde(rename = "retentionTagName", default, skip_serializing_if = "Option::is_none")] pub retention_tag_name: Option<String>, #[serde(rename = "retentionTagVersion", default, skip_serializing_if = "Option::is_none")] pub retention_tag_version: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupFindRestorableTimeRangesRequest { #[serde(rename = "sourceDataStoreType")] pub source_data_store_type: azure_backup_find_restorable_time_ranges_request::SourceDataStoreType, #[serde(rename = "startTime")] pub start_time: String, #[serde(rename = "endTime")] pub end_time: String, } pub mod azure_backup_find_restorable_time_ranges_request { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum SourceDataStoreType { OperationalStore, VaultStore,
} } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupFindRestorableTimeRangesRequestResource { #[serde(flatten)] pub dpp_worker_request: DppWorkerRequest, #[serde(default, skip_serializing_if = "Option::is_none")] pub content: Option<AzureBackupFindRestorableTimeRangesRequest>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupFindRestorableTimeRangesResponse { #[serde(rename = "restorableTimeRanges", default, skip_serializing_if = "Vec::is_empty")] pub restorable_time_ranges: Vec<RestorableTimeRange>, #[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")] pub object_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupFindRestorableTimeRangesResponseResource { #[serde(flatten)] pub dpp_resource: DppResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<AzureBackupFindRestorableTimeRangesResponse>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupJob { #[serde(rename = "activityID")] pub activity_id: String, #[serde(rename = "backupInstanceFriendlyName")] pub backup_instance_friendly_name: String, #[serde(rename = "backupInstanceId", skip_serializing)] pub backup_instance_id: Option<String>, #[serde(rename = "dataSourceId")] pub data_source_id: String, #[serde(rename = "dataSourceLocation")] pub data_source_location: String, #[serde(rename = "dataSourceName")] pub data_source_name: String, #[serde(rename = "dataSourceSetName")] pub data_source_set_name: String, #[serde(rename = "dataSourceType")] pub data_source_type: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub duration: Option<String>, #[serde(rename = "endTime", skip_serializing)] pub end_time: Option<String>, #[serde(rename = "errorDetails", skip_serializing)] pub error_details: Vec<UserFacingError>, #[serde(rename = "extendedInfo", default, skip_serializing_if = "Option::is_none")] pub extended_info: Option<JobExtendedInfo>, #[serde(rename = "isUserTriggered")] pub is_user_triggered: bool, pub operation: String, #[serde(rename = "operationCategory")] pub operation_category: String, #[serde(rename = "policyId", skip_serializing)] pub policy_id: Option<String>, #[serde(rename = "policyName", skip_serializing)] pub policy_name: Option<String>, #[serde(rename = "progressEnabled")] pub progress_enabled: bool, #[serde(rename = "progressUrl", skip_serializing)] pub progress_url: Option<String>, #[serde(rename = "restoreType", skip_serializing)] pub restore_type: Option<String>, #[serde(rename = "sourceResourceGroup")] pub source_resource_group: String, #[serde(rename = "sourceSubscriptionID")] pub source_subscription_id: String, #[serde(rename = "startTime")] pub start_time: String, pub status: String, #[serde(rename = "subscriptionId")] pub subscription_id: String, #[serde(rename = "supportedActions")] pub supported_actions: Vec<String>, #[serde(rename = "vaultName")] pub vault_name: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub etag: Option<String>, #[serde(rename = "sourceDataStoreName", default, skip_serializing_if = "Option::is_none")] pub source_data_store_name: Option<String>, #[serde(rename = "destinationDataStoreName", default, skip_serializing_if = "Option::is_none")] pub destination_data_store_name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupJobResource { #[serde(flatten)] pub dpp_resource: DppResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<AzureBackupJob>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupJobResourceList { #[serde(flatten)] pub dpp_resource_list: DppResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<AzureBackupJobResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupParams { #[serde(flatten)] pub backup_parameters: BackupParameters, #[serde(rename = "backupType")] pub backup_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupRecoveryPoint { #[serde(rename = "objectType")] pub object_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupRecoveryPointBasedRestoreRequest { #[serde(flatten)] pub azure_backup_restore_request: AzureBackupRestoreRequest, #[serde(rename = "recoveryPointId")] pub recovery_point_id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupRecoveryPointResource { #[serde(flatten)] pub dpp_resource: DppResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<AzureBackupRecoveryPoint>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupRecoveryPointResourceList { #[serde(flatten)] pub dpp_resource_list: DppResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<AzureBackupRecoveryPointResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupRehydrationRequest { #[serde(rename = "recoveryPointId")] pub recovery_point_id: String, #[serde(rename = "rehydrationPriority", default, skip_serializing_if = "Option::is_none")] pub rehydration_priority: Option<RehydrationPriority>, #[serde(rename = "rehydrationRetentionDuration")] pub rehydration_retention_duration: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupRestoreRequest { #[serde(rename = "objectType")] pub object_type: String, #[serde(rename = "restoreTargetInfo")] pub restore_target_info: RestoreTargetInfoBase, #[serde(rename = "sourceDataStoreType")] pub source_data_store_type: azure_backup_restore_request::SourceDataStoreType, } pub mod azure_backup_restore_request { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum SourceDataStoreType { ArchiveStore, SnapshotStore, VaultStore, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupRestoreWithRehydrationRequest { #[serde(flatten)] pub azure_backup_recovery_point_based_restore_request: AzureBackupRecoveryPointBasedRestoreRequest, #[serde(rename = "rehydrationPriority")] pub rehydration_priority: RehydrationPriority, #[serde(rename = "rehydrationRetentionDuration")] pub rehydration_retention_duration: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupRecoveryTimeBasedRestoreRequest { #[serde(flatten)] pub azure_backup_restore_request: AzureBackupRestoreRequest, #[serde(rename = "recoveryPointTime")] pub recovery_point_time: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupRule { #[serde(flatten)] pub base_policy_rule: BasePolicyRule, #[serde(rename = "backupParameters", default, skip_serializing_if = "Option::is_none")] pub backup_parameters: Option<BackupParameters>, #[serde(rename = "dataStore")] pub data_store: DataStoreInfoBase, pub trigger: TriggerContext, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureOperationalStoreParameters { #[serde(flatten)] pub data_store_parameters: DataStoreParameters, #[serde(rename = "resourceGroupId", default, skip_serializing_if = "Option::is_none")] pub resource_group_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureRetentionRule { #[serde(flatten)] pub base_policy_rule: BasePolicyRule, #[serde(rename = "isDefault", default, skip_serializing_if = "Option::is_none")] pub is_default: Option<bool>, pub lifecycles: Vec<SourceLifeCycle>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupCriteria { #[serde(rename = "objectType")] pub object_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupInstance { #[serde(rename = "friendlyName")] pub friendly_name: String, #[serde(rename = "dataSourceInfo")] pub data_source_info: Datasource, #[serde(rename = "dataSourceSetInfo", default, skip_serializing_if = "Option::is_none")] pub data_source_set_info: Option<DatasourceSet>, #[serde(rename = "policyInfo")] pub policy_info: PolicyInfo, #[serde(rename = "protectionStatus", default, skip_serializing_if = "Option::is_none")] pub protection_status: Option<ProtectionStatusDetails>, #[serde(rename = "currentProtectionState", skip_serializing)] pub current_protection_state: Option<backup_instance::CurrentProtectionState>, #[serde(rename = "protectionErrorDetails", default, skip_serializing_if = "Option::is_none")] pub protection_error_details: Option<UserFacingError>, #[serde(rename = "provisioningState", skip_serializing)] pub provisioning_state: Option<String>, #[serde(rename = "objectType")] pub object_type: String, } pub mod backup_instance { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum CurrentProtectionState { Invalid, NotProtected, ConfiguringProtection, ProtectionConfigured, BackupSchedulesSuspended, RetentionSchedulesSuspended, ProtectionStopped, ProtectionError, ConfiguringProtectionFailed, SoftDeleting, SoftDeleted, UpdatingProtection, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupInstanceResource { #[serde(flatten)] pub dpp_resource: DppResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<BackupInstance>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupInstanceResourceList { #[serde(flatten)] pub dpp_resource_list: DppResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<BackupInstanceResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupParameters { #[serde(rename = "objectType")] pub object_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupPolicy { #[serde(flatten)] pub base_backup_policy: BaseBackupPolicy, #[serde(rename = "policyRules")] pub policy_rules: Vec<BasePolicyRule>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupSchedule { #[serde(rename = "repeatingTimeIntervals")] pub repeating_time_intervals: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupVault { #[serde(rename = "provisioningState", skip_serializing)] pub provisioning_state: Option<backup_vault::ProvisioningState>, #[serde(rename = "storageSettings")] pub storage_settings: Vec<StorageSetting>, } pub mod backup_vault { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Failed, Provisioning, Succeeded, Unknown, Updating, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupVaultResource { #[serde(flatten)] pub dpp_tracked_resource: DppTrackedResource, pub properties: BackupVault, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupVaultResourceList { #[serde(flatten)] pub dpp_resource_list: DppResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<BackupVaultResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BaseBackupPolicy { #[serde(rename = "datasourceTypes")] pub datasource_types: Vec<String>, #[serde(rename = "objectType")] pub object_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BaseBackupPolicyResource { #[serde(flatten)] pub dpp_resource: DppResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<BaseBackupPolicy>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BaseBackupPolicyResourceList { #[serde(flatten)] pub dpp_resource_list: DppResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<BaseBackupPolicyResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BasePolicyRule { pub name: String, #[serde(rename = "objectType")] pub object_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CheckNameAvailabilityRequest { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CheckNameAvailabilityResult { #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(rename = "nameAvailable", default, skip_serializing_if = "Option::is_none")] pub name_available: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub reason: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClientDiscoveryDisplay { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub resource: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClientDiscoveryForLogSpecification { #[serde(rename = "blobDuration", default, skip_serializing_if = "Option::is_none")] pub blob_duration: Option<String>, #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClientDiscoveryForProperties { #[serde(rename = "serviceSpecification", default, skip_serializing_if = "Option::is_none")] pub service_specification: Option<ClientDiscoveryForServiceSpecification>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClientDiscoveryForServiceSpecification { #[serde(rename = "logSpecifications", default, skip_serializing_if = "Vec::is_empty")] pub log_specifications: Vec<ClientDiscoveryForLogSpecification>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClientDiscoveryResponse { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ClientDiscoveryValueForSingleApi>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClientDiscoveryValueForSingleApi { #[serde(default, skip_serializing_if = "Option::is_none")] pub display: Option<ClientDiscoveryDisplay>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "isDataAction", default, skip_serializing_if = "Option::is_none")] pub is_data_action: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub origin: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ClientDiscoveryForProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CloudError { #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<Error>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CopyOnExpiryOption { #[serde(flatten)] pub copy_option: CopyOption, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CopyOption { #[serde(rename = "objectType")] pub object_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomCopyOption { #[serde(flatten)] pub copy_option: CopyOption, #[serde(default, skip_serializing_if = "Option::is_none")] pub duration: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Datasource { #[serde(rename = "datasourceType", default, skip_serializing_if = "Option::is_none")] pub datasource_type: Option<String>, #[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")] pub object_type: Option<String>, #[serde(rename = "resourceID")] pub resource_id: String, #[serde(rename = "resourceLocation", default, skip_serializing_if = "Option::is_none")] pub resource_location: Option<String>, #[serde(rename = "resourceName", default, skip_serializing_if = "Option::is_none")] pub resource_name: Option<String>, #[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")] pub resource_type: Option<String>, #[serde(rename = "resourceUri", default, skip_serializing_if = "Option::is_none")] pub resource_uri: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DatasourceSet { #[serde(rename = "datasourceType", default, skip_serializing_if = "Option::is_none")] pub datasource_type: Option<String>, #[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")] pub object_type: Option<String>, #[serde(rename = "resourceID")] pub resource_id: String, #[serde(rename = "resourceLocation", default, skip_serializing_if = "Option::is_none")] pub resource_location: Option<String>, #[serde(rename = "resourceName", default, skip_serializing_if = "Option::is_none")] pub resource_name: Option<String>, #[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")] pub resource_type: Option<String>, #[serde(rename = "resourceUri", default, skip_serializing_if = "Option::is_none")] pub resource_uri: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataStoreInfoBase { #[serde(rename = "dataStoreType")] pub data_store_type: data_store_info_base::DataStoreType, #[serde(rename = "objectType")] pub object_type: String, } pub mod data_store_info_base { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DataStoreType { OperationalStore, VaultStore, ArchiveStore, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataStoreParameters { #[serde(rename = "objectType")] pub object_type: String, #[serde(rename = "dataStoreType")] pub data_store_type: data_store_parameters::DataStoreType, } pub mod data_store_parameters { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DataStoreType { OperationalStore, VaultStore, ArchiveStore, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Day { #[serde(default, skip_serializing_if = "Option::is_none")] pub date: Option<i32>, #[serde(rename = "isLast", default, skip_serializing_if = "Option::is_none")] pub is_last: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DeleteOption { pub duration: String, #[serde(rename = "objectType")] pub object_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DppIdentityDetails { #[serde(rename = "principalId", skip_serializing)] pub principal_id: Option<String>, #[serde(rename = "tenantId", skip_serializing)] pub tenant_id: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DppResource { #[serde(skip_serializing)] pub id: Option<String>, #[serde(skip_serializing)] pub name: Option<String>, #[serde(rename = "type", skip_serializing)] pub type_: Option<String>, #[serde(rename = "systemData", skip_serializing)] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DppResourceList { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DppTrackedResource { #[serde(rename = "eTag", default, skip_serializing_if = "Option::is_none")] pub e_tag: Option<String>, #[serde(skip_serializing)] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<DppIdentityDetails>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(skip_serializing)] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(rename = "type", skip_serializing)] pub type_: Option<String>, #[serde(rename = "systemData", skip_serializing)] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SystemData { #[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")] pub created_by: Option<String>, #[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")] pub created_by_type: Option<system_data::CreatedByType>, #[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")] pub created_at: Option<String>, #[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")] pub last_modified_by: Option<String>, #[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")] pub last_modified_by_type: Option<system_data::LastModifiedByType>, #[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")] pub last_modified_at: Option<String>, } pub mod system_data { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum CreatedByType { User, Application, ManagedIdentity, Key, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum LastModifiedByType { User, Application, ManagedIdentity, Key, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DppTrackedResourceList { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DppWorkerRequest { #[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")] pub subscription_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub uri: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub headers: Option<serde_json::Value>, #[serde(rename = "supportedGroupVersions", default, skip_serializing_if = "Vec::is_empty")] pub supported_group_versions: Vec<String>, #[serde(rename = "cultureInfo", default, skip_serializing_if = "Option::is_none")] pub culture_info: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub parameters: Option<serde_json::Value>, #[serde(rename = "httpMethod", default, skip_serializing_if = "Option::is_none")] pub http_method: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Error { #[serde(rename = "additionalInfo", skip_serializing)] pub additional_info: Vec<ErrorAdditionalInfo>, #[serde(skip_serializing)] pub code: Option<String>, #[serde(skip_serializing)] pub details: Vec<Error>, #[serde(skip_serializing)] pub message: Option<String>, #[serde(skip_serializing)] pub target: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorAdditionalInfo { #[serde(skip_serializing)] pub info: Option<serde_json::Value>, #[serde(rename = "type", skip_serializing)] pub type_: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ExportJobsResult { #[serde(rename = "blobUrl", skip_serializing)] pub blob_url: Option<String>, #[serde(rename = "blobSasKey", skip_serializing)] pub blob_sas_key: Option<String>, #[serde(rename = "excelFileBlobUrl", skip_serializing)] pub excel_file_blob_url: Option<String>, #[serde(rename = "excelFileBlobSasKey", skip_serializing)] pub excel_file_blob_sas_key: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct FeatureValidationRequest { #[serde(flatten)] pub feature_validation_request_base: FeatureValidationRequestBase, #[serde(rename = "featureType", default, skip_serializing_if = "Option::is_none")] pub feature_type: Option<feature_validation_request::FeatureType>, #[serde(rename = "featureName", default, skip_serializing_if = "Option::is_none")] pub feature_name: Option<String>, } pub mod feature_validation_request { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum FeatureType { Invalid, DataSourceType, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct FeatureValidationRequestBase { #[serde(rename = "objectType")] pub object_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct FeatureValidationResponse { #[serde(flatten)] pub feature_validation_response_base: FeatureValidationResponseBase, #[serde(rename = "featureType", default, skip_serializing_if = "Option::is_none")] pub feature_type: Option<feature_validation_response::FeatureType>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub features: Vec<SupportedFeature>, } pub mod feature_validation_response { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum FeatureType { Invalid, DataSourceType, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct FeatureValidationResponseBase { #[serde(rename = "objectType")] pub object_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ImmediateCopyOption { #[serde(flatten)] pub copy_option: CopyOption, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InnerError { #[serde(rename = "additionalInfo", default, skip_serializing_if = "Option::is_none")] pub additional_info: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(rename = "embeddedInnerError", default, skip_serializing_if = "Option::is_none")] pub embedded_inner_error: Option<InnerError>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ItemLevelRestoreCriteria { #[serde(rename = "objectType")] pub object_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ItemLevelRestoreTargetInfo { #[serde(flatten)] pub restore_target_info_base: RestoreTargetInfoBase, #[serde(rename = "restoreCriteria")] pub restore_criteria: Vec<ItemLevelRestoreCriteria>, #[serde(rename = "datasourceInfo")] pub datasource_info: Datasource, #[serde(rename = "datasourceSetInfo", default, skip_serializing_if = "Option::is_none")] pub datasource_set_info: Option<DatasourceSet>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobExtendedInfo { #[serde(rename = "additionalDetails", default, skip_serializing_if = "Option::is_none")] pub additional_details: Option<serde_json::Value>, #[serde(rename = "backupInstanceState", skip_serializing)] pub backup_instance_state: Option<String>, #[serde(rename = "dataTransferredInBytes", skip_serializing)] pub data_transferred_in_bytes: Option<f64>, #[serde(rename = "recoveryDestination", skip_serializing)] pub recovery_destination: Option<String>, #[serde(rename = "sourceRecoverPoint", default, skip_serializing_if = "Option::is_none")] pub source_recover_point: Option<RestoreJobRecoveryPointDetails>, #[serde(rename = "subTasks", skip_serializing)] pub sub_tasks: Vec<JobSubTask>, #[serde(rename = "targetRecoverPoint", default, skip_serializing_if = "Option::is_none")] pub target_recover_point: Option<RestoreJobRecoveryPointDetails>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobSubTask { #[serde(rename = "additionalDetails", default, skip_serializing_if = "Option::is_none")] pub additional_details: Option<serde_json::Value>, #[serde(rename = "taskId")] pub task_id: i32, #[serde(rename = "taskName")] pub task_name: String, #[serde(rename = "taskProgress", skip_serializing)] pub task_progress: Option<String>, #[serde(rename = "taskStatus")] pub task_status: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationExtendedInfo { #[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")] pub object_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationJobExtendedInfo { #[serde(flatten)] pub operation_extended_info: OperationExtendedInfo, #[serde(rename = "jobId", default, skip_serializing_if = "Option::is_none")] pub job_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationResource { #[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")] pub end_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<Error>, #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<OperationExtendedInfo>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PatchResourceRequestInput { #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<DppIdentityDetails>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PolicyInfo { #[serde(rename = "policyId")] pub policy_id: String, #[serde(rename = "policyVersion", skip_serializing)] pub policy_version: Option<String>, #[serde(rename = "policyParameters", default, skip_serializing_if = "Option::is_none")] pub policy_parameters: Option<PolicyParameters>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PolicyParameters { #[serde(rename = "dataStoreParametersList", default, skip_serializing_if = "Vec::is_empty")] pub data_store_parameters_list: Vec<DataStoreParameters>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectionStatusDetails { #[serde(rename = "errorDetails", default, skip_serializing_if = "Option::is_none")] pub error_details: Option<UserFacingError>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<protection_status_details::Status>, } pub mod protection_status_details { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { ConfiguringProtection, ConfiguringProtectionFailed, ProtectionConfigured, ProtectionStopped, SoftDeleted, SoftDeleting, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RangeBasedItemLevelRestoreCriteria { #[serde(flatten)] pub item_level_restore_criteria: ItemLevelRestoreCriteria, #[serde(rename = "minMatchingValue", default, skip_serializing_if = "Option::is_none")] pub min_matching_value: Option<String>, #[serde(rename = "maxMatchingValue", default, skip_serializing_if = "Option::is_none")] pub max_matching_value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecoveryPointDataStoreDetails { #[serde(rename = "creationTime", default, skip_serializing_if = "Option::is_none")] pub creation_time: Option<String>, #[serde(rename = "expiryTime", default, skip_serializing_if = "Option::is_none")] pub expiry_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "metaData", default, skip_serializing_if = "Option::is_none")] pub meta_data: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub state: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub visible: Option<bool>, #[serde(rename = "rehydrationExpiryTime", skip_serializing)] pub rehydration_expiry_time: Option<String>, #[serde(rename = "rehydrationStatus", skip_serializing)] pub rehydration_status: Option<recovery_point_data_store_details::RehydrationStatus>, } pub mod recovery_point_data_store_details { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RehydrationStatus { #[serde(rename = "CREATE_IN_PROGRESS")] CreateInProgress, #[serde(rename = "COMPLETED")] Completed, #[serde(rename = "DELETE_IN_PROGRESS")] DeleteInProgress, #[serde(rename = "DELETED")] Deleted, #[serde(rename = "FAILED")] Failed, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RehydrationPriority { Invalid, High, Standard, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestoreFilesTargetInfo { #[serde(flatten)] pub restore_target_info_base: RestoreTargetInfoBase, #[serde(rename = "targetDetails")] pub target_details: TargetDetails, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestoreJobRecoveryPointDetails { #[serde(rename = "recoveryPointID", default, skip_serializing_if = "Option::is_none")] pub recovery_point_id: Option<String>, #[serde(rename = "recoveryPointTime", default, skip_serializing_if = "Option::is_none")] pub recovery_point_time: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestoreTargetInfo { #[serde(flatten)] pub restore_target_info_base: RestoreTargetInfoBase, #[serde(rename = "datasourceInfo")] pub datasource_info: Datasource, #[serde(rename = "datasourceSetInfo", default, skip_serializing_if = "Option::is_none")] pub datasource_set_info: Option<DatasourceSet>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestoreTargetInfoBase { #[serde(rename = "objectType")] pub object_type: String, #[serde(rename = "recoveryOption")] pub recovery_option: restore_target_info_base::RecoveryOption, #[serde(rename = "restoreLocation", default, skip_serializing_if = "Option::is_none")] pub restore_location: Option<String>, } pub mod restore_target_info_base { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RecoveryOption { FailIfExists, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestorableTimeRange { #[serde(rename = "startTime")] pub start_time: String, #[serde(rename = "endTime")] pub end_time: String, #[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")] pub object_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RetentionTag { #[serde(rename = "eTag", skip_serializing)] pub e_tag: Option<String>, #[serde(skip_serializing)] pub id: Option<String>, #[serde(rename = "tagName")] pub tag_name: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ScheduleBasedBackupCriteria { #[serde(flatten)] pub backup_criteria: BackupCriteria, #[serde(rename = "absoluteCriteria", default, skip_serializing_if = "Vec::is_empty")] pub absolute_criteria: Vec<String>, #[serde(rename = "daysOfMonth", default, skip_serializing_if = "Vec::is_empty")] pub days_of_month: Vec<Day>, #[serde(rename = "daysOfTheWeek", default, skip_serializing_if = "Vec::is_empty")] pub days_of_the_week: Vec<String>, #[serde(rename = "monthsOfYear", default, skip_serializing_if = "Vec::is_empty")] pub months_of_year: Vec<String>, #[serde(rename = "scheduleTimes", default, skip_serializing_if = "Vec::is_empty")] pub schedule_times: Vec<String>, #[serde(rename = "weeksOfTheMonth", default, skip_serializing_if = "Vec::is_empty")] pub weeks_of_the_month: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ScheduleBasedTriggerContext { #[serde(flatten)] pub trigger_context: TriggerContext, pub schedule: BackupSchedule, #[serde(rename = "taggingCriteria")] pub tagging_criteria: Vec<TaggingCriteria>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SourceLifeCycle { #[serde(rename = "deleteAfter")] pub delete_after: DeleteOption, #[serde(rename = "sourceDataStore")] pub source_data_store: DataStoreInfoBase, #[serde(rename = "targetDataStoreCopySettings", default, skip_serializing_if = "Vec::is_empty")] pub target_data_store_copy_settings: Vec<TargetCopySetting>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct StorageSetting { #[serde(rename = "datastoreType", default, skip_serializing_if = "Option::is_none")] pub datastore_type: Option<storage_setting::DatastoreType>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<storage_setting::Type>, } pub mod storage_setting { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DatastoreType { ArchiveStore, SnapshotStore, VaultStore, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { GeoRedundant, LocallyRedundant, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SupportedFeature { #[serde(rename = "featureName", default, skip_serializing_if = "Option::is_none")] pub feature_name: Option<String>, #[serde(rename = "supportStatus", default, skip_serializing_if = "Option::is_none")] pub support_status: Option<supported_feature::SupportStatus>, #[serde(rename = "exposureControlledFeatures", default, skip_serializing_if = "Vec::is_empty")] pub exposure_controlled_features: Vec<String>, } pub mod supported_feature { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum SupportStatus { Invalid, NotSupported, AlphaPreview, PrivatePreview, PublicPreview, GenerallyAvailable, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TaggingCriteria { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub criteria: Vec<BackupCriteria>, #[serde(rename = "isDefault")] pub is_default: bool, #[serde(rename = "taggingPriority")] pub tagging_priority: i64, #[serde(rename = "tagInfo")] pub tag_info: RetentionTag, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TargetCopySetting { #[serde(rename = "copyAfter")] pub copy_after: CopyOption, #[serde(rename = "dataStore")] pub data_store: DataStoreInfoBase, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TargetDetails { #[serde(rename = "filePrefix")] pub file_prefix: String, #[serde(rename = "restoreTargetLocationType")] pub restore_target_location_type: target_details::RestoreTargetLocationType, pub url: String, } pub mod target_details { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RestoreTargetLocationType { Invalid, AzureBlobs, AzureFiles, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TriggerBackupRequest { #[serde(rename = "backupRuleOptions")] pub backup_rule_options: AdHocBackupRuleOptions, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TriggerContext { #[serde(rename = "objectType")] pub object_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UserFacingError { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub details: Vec<UserFacingError>, #[serde(rename = "innerError", default, skip_serializing_if = "Option::is_none")] pub inner_error: Option<InnerError>, #[serde(rename = "isRetryable", default, skip_serializing_if = "Option::is_none")] pub is_retryable: Option<bool>, #[serde(rename = "isUserError", default, skip_serializing_if = "Option::is_none")] pub is_user_error: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(rename = "recommendedAction", default, skip_serializing_if = "Vec::is_empty")] pub recommended_action: Vec<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub target: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ValidateForBackupRequest { #[serde(rename = "backupInstance")] pub backup_instance: BackupInstance, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ValidateRestoreRequestObject { #[serde(rename = "restoreRequestObject")] pub restore_request_object: AzureBackupRestoreRequest, }
ArchiveStore,
create.go
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "bytes" "fmt" "io" "os" "github.com/golang/glog" "github.com/spf13/cobra" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kops/cmd/kops/util" kopsapi "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/apis/kops/v1alpha1" "k8s.io/kops/pkg/kopscodecs" "k8s.io/kops/upup/pkg/fi/cloudup" "k8s.io/kops/util/pkg/vfs" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/kubectl/util/i18n" ) type CreateOptions struct { resource.FilenameOptions } var ( createLong = templates.LongDesc(i18n.T(` Create a resource:` + validResources + ` Create a cluster, instancegroup or secret using command line parameters, YAML configuration specification files, or stdin. (Note: secrets cannot be created from YAML config files yet). `)) createExample = templates.Examples(i18n.T(` # Create a cluster from the configuration specification in a YAML file kops create -f my-cluster.yaml # Create secret from secret spec file kops create -f secret.yaml # Create an instancegroup based on the YAML passed into stdin. cat instancegroup.yaml | kops create -f - # Create a cluster in AWS kops create cluster --name=kubernetes-cluster.example.com \ --state=s3://kops-state-1234 --zones=eu-west-1a \ --node-count=2 --node-size=t2.micro --master-size=t2.micro \ --dns-zone=example.com # Create an instancegroup for the k8s-cluster.example.com cluster. kops create ig --name=k8s-cluster.example.com node-example \ --role node --subnet my-subnet-name # Create an new ssh public key called admin. kops create secret sshpublickey admin -i ~/.ssh/id_rsa.pub \ --name k8s-cluster.example.com --state s3://example.com `)) createShort = i18n.T("Create a resource by command line, filename or stdin.") ) func NewCmdCreate(f *util.Factory, out io.Writer) *cobra.Command { options := &CreateOptions{} cmd := &cobra.Command{ Use: "create -f FILENAME", Short: createShort, Long: createLong, Example: createExample, Run: func(cmd *cobra.Command, args []string) { if cmdutil.IsFilenameSliceEmpty(options.Filenames) { cmd.Help() return } cmdutil.CheckErr(RunCreate(f, out, options)) }, } cmd.Flags().StringSliceVarP(&options.Filenames, "filename", "f", options.Filenames, "Filename to use to create the resource") //usage := "to use to create the resource" //cmdutil.AddFilenameOptionFlags(cmd, options, usage) cmd.MarkFlagRequired("filename") //cmdutil.AddValidateFlags(cmd) //cmdutil.AddOutputFlagsForMutation(cmd) //cmdutil.AddApplyAnnotationFlags(cmd) //cmdutil.AddRecordFlag(cmd) //cmdutil.AddInclude3rdPartyFlags(cmd) // create subcommands cmd.AddCommand(NewCmdCreateCluster(f, out)) cmd.AddCommand(NewCmdCreateInstanceGroup(f, out)) cmd.AddCommand(NewCmdCreateSecret(f, out)) return cmd } func RunCreate(f *util.Factory, out io.Writer, c *CreateOptions) error { clientset, err := f.Clientset() if err != nil { return err } // Codecs provides access to encoding and decoding for the scheme codecs := kopscodecs.Codecs //serializer.NewCodecFactory(scheme) codec := codecs.UniversalDecoder(kopsapi.SchemeGroupVersion) var clusterName = "" //var cSpec = false var sb bytes.Buffer fmt.Fprintf(&sb, "\n") for _, f := range c.Filenames { var contents []byte if f == "-" { file := os.Stdin defer file.Close() buf := new(bytes.Buffer) buf.ReadFrom(file) contents = buf.Bytes() } else { contents, err = vfs.Context.ReadFile(f) if err != nil { return fmt.Errorf("error reading file %q: %v", f, err) } } // TODO: this does not support a JSON array sections := bytes.Split(bytes.Replace(contents, []byte("\r\n"), []byte("\n"), -1), []byte("\n---\n")) for _, section := range sections { defaults := &schema.GroupVersionKind{ Group: v1alpha1.SchemeGroupVersion.Group, Version: v1alpha1.SchemeGroupVersion.Version, } o, gvk, err := codec.Decode(section, defaults, nil) if err != nil { return fmt.Errorf("error parsing file %q: %v", f, err) } switch v := o.(type) { case *kopsapi.Cluster: // Adding a PerformAssignments() call here as the user might be trying to use // the new `-f` feature, with an old cluster definition. err = cloudup.PerformAssignments(v) if err != nil { return fmt.Errorf("error populating configuration: %v", err) } _, err = clientset.CreateCluster(v) if err != nil { if apierrors.IsAlreadyExists(err) { return fmt.Errorf("cluster %q already exists", v.ObjectMeta.Name) } return fmt.Errorf("error creating cluster: %v", err) } else { fmt.Fprintf(&sb, "Created cluster/%s\n", v.ObjectMeta.Name) //cSpec = true } case *kopsapi.InstanceGroup: clusterName = v.ObjectMeta.Labels[kopsapi.LabelClusterName] if clusterName == "" { return fmt.Errorf("must specify %q label with cluster name to create instanceGroup", kopsapi.LabelClusterName) } cluster, err := clientset.GetCluster(clusterName) if err != nil { return fmt.Errorf("error querying cluster %q: %v", clusterName, err) } if cluster == nil { return fmt.Errorf("cluster %q not found", clusterName) } _, err = clientset.InstanceGroupsFor(cluster).Create(v) if err != nil { if apierrors.IsAlreadyExists(err) { return fmt.Errorf("instanceGroup %q already exists", v.ObjectMeta.Name) } return fmt.Errorf("error creating instanceGroup: %v", err) } else { fmt.Fprintf(&sb, "Created instancegroup/%s\n", v.ObjectMeta.Name) } case *kopsapi.SSHCredential: clusterName = v.ObjectMeta.Labels[kopsapi.LabelClusterName] if clusterName == "" { return fmt.Errorf("must specify %q label with cluster name to create SSHCredential", kopsapi.LabelClusterName) } if v.Spec.PublicKey == ""
cluster, err := clientset.GetCluster(clusterName) if err != nil { return err } sshCredentialStore, err := clientset.SSHCredentialStore(cluster) if err != nil { return err } sshKeyArr := []byte(v.Spec.PublicKey) err = sshCredentialStore.AddSSHPublicKey("admin", sshKeyArr) if err != nil { return err } else { fmt.Fprintf(&sb, "Added ssh credential\n") } default: glog.V(2).Infof("Type of object was %T", v) return fmt.Errorf("Unhandled kind %q in %s", gvk, f) } } } { // If there is a value in this sb, this should mean that we have something to deploy // so let's advise the user how to engage the cloud provider and deploy if sb.String() != "" { fmt.Fprintf(&sb, "\n") fmt.Fprintf(&sb, "To deploy these resources, run: kops update cluster %s --yes\n", clusterName) fmt.Fprintf(&sb, "\n") } _, err := out.Write(sb.Bytes()) if err != nil { return fmt.Errorf("error writing to output: %v", err) } } return nil }
{ return fmt.Errorf("spec.PublicKey is required") }
specialization-cross-crate-no-gate.rs
// run-pass // Test that specialization works even if only the upstream crate enables it // aux-build:specialization_cross_crate.rs extern crate specialization_cross_crate; use specialization_cross_crate::*; fn main()
{ assert!(0u8.foo() == "generic Clone"); assert!(vec![0u8].foo() == "generic Vec"); assert!(vec![0i32].foo() == "Vec<i32>"); assert!(0i32.foo() == "i32"); assert!(String::new().foo() == "String"); assert!(((), 0).foo() == "generic pair"); assert!(((), ()).foo() == "generic uniform pair"); assert!((0u8, 0u32).foo() == "(u8, u32)"); assert!((0u8, 0u8).foo() == "(u8, u8)"); }
test_zone_operations.py
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import mock import grpc from grpc.experimental import aio import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from requests import Response from requests.sessions import Session from google import auth from google.api_core import client_options from google.api_core import exceptions from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.auth import credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.compute_v1.services.zone_operations import ZoneOperationsClient from google.cloud.compute_v1.services.zone_operations import transports from google.cloud.compute_v1.types import compute from google.oauth2 import service_account def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return ( "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT ) def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert ZoneOperationsClient._get_default_mtls_endpoint(None) is None assert ( ZoneOperationsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint ) assert ( ZoneOperationsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( ZoneOperationsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( ZoneOperationsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert ( ZoneOperationsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi ) def test_zone_operations_client_from_service_account_info(): creds = credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} client = ZoneOperationsClient.from_service_account_info(info) assert client.transport._credentials == creds assert client.transport._host == "compute.googleapis.com:443" @pytest.mark.parametrize("client_class", [ZoneOperationsClient,]) def test_zone_operations_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds assert client.transport._host == "compute.googleapis.com:443" def test_zone_operations_client_get_transport_class(): transport = ZoneOperationsClient.get_transport_class() available_transports = [ transports.ZoneOperationsRestTransport, ] assert transport in available_transports transport = ZoneOperationsClient.get_transport_class("rest") assert transport == transports.ZoneOperationsRestTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [(ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest"),], ) @mock.patch.object( ZoneOperationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ZoneOperationsClient), ) def test_zone_operations_client_client_options( client_class, transport_class, transport_name ): # Check that if channel is provided we won't create a new one. with mock.patch.object(ZoneOperationsClient, "get_transport_class") as gtc: transport = transport_class(credentials=credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(ZoneOperationsClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class() # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): client = client_class() # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,use_client_cert_env", [ (ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest", "true"), (ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest", "false"), ], ) @mock.patch.object( ZoneOperationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ZoneOperationsClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_zone_operations_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): options = client_options.ClientOptions( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=client_cert_source_callback, ): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name", [(ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest"),], ) def test_zone_operations_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name", [(ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest"),], ) def test_zone_operations_client_client_options_credentials_file( client_class, transport_class, transport_name ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) def test_delete_rest( transport: str = "rest", request_type=compute.DeleteZoneOperationRequest ): client = ZoneOperationsClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. return_value = compute.DeleteZoneOperationResponse() # Wrap the value into a proper Response obj json_return_value = compute.DeleteZoneOperationResponse.to_json(return_value) response_value = Response() response_value.status_code = 200 response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value response = client.delete(request) # Establish that the response is the type that we expect. assert isinstance(response, compute.DeleteZoneOperationResponse) def test_delete_rest_from_dict(): test_delete_rest(request_type=dict) def test_delete_rest_flattened(): client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),) # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. return_value = compute.DeleteZoneOperationResponse() # Wrap the value into a proper Response obj json_return_value = compute.DeleteZoneOperationResponse.to_json(return_value) response_value = Response() response_value.status_code = 200 response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete( project="project_value", zone="zone_value", operation="operation_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, http_call, http_params = req.mock_calls[0] body = http_params.get("json") assert "project_value" in http_call[1] + str(body) assert "zone_value" in http_call[1] + str(body) assert "operation_value" in http_call[1] + str(body) def test_delete_rest_flattened_error(): client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.delete( compute.DeleteZoneOperationRequest(), project="project_value", zone="zone_value", operation="operation_value", ) def test_get_rest( transport: str = "rest", request_type=compute.GetZoneOperationRequest ): client = ZoneOperationsClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. return_value = compute.Operation( client_operation_id="client_operation_id_value", creation_timestamp="creation_timestamp_value", description="description_value", end_time="end_time_value", error=compute.Error(errors=[compute.Errors(code="code_value")]), http_error_message="http_error_message_value", http_error_status_code=2374, id="id_value", insert_time="insert_time_value", kind="kind_value", name="name_value", operation_type="operation_type_value", progress=885, region="region_value", self_link="self_link_value", start_time="start_time_value", status=compute.Operation.Status.DONE, status_message="status_message_value", target_id="target_id_value", target_link="target_link_value", user="user_value", warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)], zone="zone_value", ) # Wrap the value into a proper Response obj json_return_value = compute.Operation.to_json(return_value) response_value = Response() response_value.status_code = 200 response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value response = client.get(request) # Establish that the response is the type that we expect. assert isinstance(response, compute.Operation) assert response.client_operation_id == "client_operation_id_value" assert response.creation_timestamp == "creation_timestamp_value" assert response.description == "description_value" assert response.end_time == "end_time_value" assert response.error == compute.Error(errors=[compute.Errors(code="code_value")]) assert response.http_error_message == "http_error_message_value" assert response.http_error_status_code == 2374 assert response.id == "id_value" assert response.insert_time == "insert_time_value" assert response.kind == "kind_value" assert response.name == "name_value" assert response.operation_type == "operation_type_value" assert response.progress == 885 assert response.region == "region_value" assert response.self_link == "self_link_value" assert response.start_time == "start_time_value" assert response.status == compute.Operation.Status.DONE assert response.status_message == "status_message_value" assert response.target_id == "target_id_value" assert response.target_link == "target_link_value" assert response.user == "user_value" assert response.warnings == [ compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED) ] assert response.zone == "zone_value" def test_get_rest_from_dict(): test_get_rest(request_type=dict) def test_get_rest_flattened(): client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),) # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. return_value = compute.Operation() # Wrap the value into a proper Response obj json_return_value = compute.Operation.to_json(return_value) response_value = Response() response_value.status_code = 200 response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get( project="project_value", zone="zone_value", operation="operation_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, http_call, http_params = req.mock_calls[0] body = http_params.get("json") assert "project_value" in http_call[1] + str(body) assert "zone_value" in http_call[1] + str(body) assert "operation_value" in http_call[1] + str(body) def test_get_rest_flattened_error(): client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get( compute.GetZoneOperationRequest(), project="project_value", zone="zone_value", operation="operation_value", ) def test_list_rest( transport: str = "rest", request_type=compute.ListZoneOperationsRequest ): client = ZoneOperationsClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. return_value = compute.OperationList( id="id_value", items=[compute.Operation(client_operation_id="client_operation_id_value")], kind="kind_value", next_page_token="next_page_token_value", self_link="self_link_value", warning=compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED), ) # Wrap the value into a proper Response obj json_return_value = compute.OperationList.to_json(return_value) response_value = Response() response_value.status_code = 200 response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value response = client.list(request) assert response.raw_page is response # Establish that the response is the type that we expect. assert isinstance(response, compute.OperationList) assert response.id == "id_value" assert response.items == [ compute.Operation(client_operation_id="client_operation_id_value") ] assert response.kind == "kind_value" assert response.next_page_token == "next_page_token_value" assert response.self_link == "self_link_value" assert response.warning == compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED) def test_list_rest_from_dict(): test_list_rest(request_type=dict) def test_list_rest_flattened(): client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),) # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. return_value = compute.OperationList() # Wrap the value into a proper Response obj json_return_value = compute.OperationList.to_json(return_value) response_value = Response() response_value.status_code = 200 response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list( project="project_value", zone="zone_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, http_call, http_params = req.mock_calls[0] body = http_params.get("json") assert "project_value" in http_call[1] + str(body) assert "zone_value" in http_call[1] + str(body) def test_list_rest_flattened_error(): client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list( compute.ListZoneOperationsRequest(), project="project_value", zone="zone_value", ) def test_wait_rest( transport: str = "rest", request_type=compute.WaitZoneOperationRequest ): client = ZoneOperationsClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. return_value = compute.Operation( client_operation_id="client_operation_id_value", creation_timestamp="creation_timestamp_value", description="description_value", end_time="end_time_value", error=compute.Error(errors=[compute.Errors(code="code_value")]), http_error_message="http_error_message_value", http_error_status_code=2374, id="id_value", insert_time="insert_time_value", kind="kind_value", name="name_value", operation_type="operation_type_value", progress=885, region="region_value", self_link="self_link_value", start_time="start_time_value", status=compute.Operation.Status.DONE, status_message="status_message_value", target_id="target_id_value", target_link="target_link_value", user="user_value", warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)], zone="zone_value", ) # Wrap the value into a proper Response obj json_return_value = compute.Operation.to_json(return_value) response_value = Response() response_value.status_code = 200 response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value response = client.wait(request) # Establish that the response is the type that we expect. assert isinstance(response, compute.Operation) assert response.client_operation_id == "client_operation_id_value" assert response.creation_timestamp == "creation_timestamp_value" assert response.description == "description_value" assert response.end_time == "end_time_value" assert response.error == compute.Error(errors=[compute.Errors(code="code_value")]) assert response.http_error_message == "http_error_message_value" assert response.http_error_status_code == 2374 assert response.id == "id_value" assert response.insert_time == "insert_time_value" assert response.kind == "kind_value" assert response.name == "name_value" assert response.operation_type == "operation_type_value" assert response.progress == 885 assert response.region == "region_value" assert response.self_link == "self_link_value" assert response.start_time == "start_time_value" assert response.status == compute.Operation.Status.DONE assert response.status_message == "status_message_value" assert response.target_id == "target_id_value" assert response.target_link == "target_link_value" assert response.user == "user_value" assert response.warnings == [ compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED) ] assert response.zone == "zone_value" def test_wait_rest_from_dict(): test_wait_rest(request_type=dict) def test_wait_rest_flattened(): client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),) # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # Designate an appropriate value for the returned response. return_value = compute.Operation() # Wrap the value into a proper Response obj json_return_value = compute.Operation.to_json(return_value) response_value = Response() response_value.status_code = 200 response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.wait( project="project_value", zone="zone_value", operation="operation_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, http_call, http_params = req.mock_calls[0] body = http_params.get("json") assert "project_value" in http_call[1] + str(body) assert "zone_value" in http_call[1] + str(body) assert "operation_value" in http_call[1] + str(body) def test_wait_rest_flattened_error(): client = ZoneOperationsClient(credentials=credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.wait( compute.WaitZoneOperationRequest(), project="project_value", zone="zone_value", operation="operation_value", ) def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.ZoneOperationsRestTransport( credentials=credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = ZoneOperationsClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.ZoneOperationsRestTransport( credentials=credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = ZoneOperationsClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide scopes and a transport instance. transport = transports.ZoneOperationsRestTransport( credentials=credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = ZoneOperationsClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.ZoneOperationsRestTransport( credentials=credentials.AnonymousCredentials(), ) client = ZoneOperationsClient(transport=transport) assert client.transport is transport @pytest.mark.parametrize("transport_class", [transports.ZoneOperationsRestTransport,]) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(auth, "default") as adc: adc.return_value = (credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() def test_zone_operations_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(exceptions.DuplicateCredentialArgs): transport = transports.ZoneOperationsTransport( credentials=credentials.AnonymousCredentials(), credentials_file="credentials.json", ) def test_zone_operations_base_transport(): # Instantiate the base transport. with mock.patch( "google.cloud.compute_v1.services.zone_operations.transports.ZoneOperationsTransport.__init__" ) as Transport: Transport.return_value = None transport = transports.ZoneOperationsTransport( credentials=credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( "delete", "get", "list", "wait", ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) def test_zone_operations_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( auth, "load_credentials_from_file" ) as load_creds, mock.patch( "google.cloud.compute_v1.services.zone_operations.transports.ZoneOperationsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (credentials.AnonymousCredentials(), None) transport = transports.ZoneOperationsTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=( "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/cloud-platform", ), quota_project_id="octopus", ) def test_zone_operations_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(auth, "default") as adc, mock.patch( "google.cloud.compute_v1.services.zone_operations.transports.ZoneOperationsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None adc.return_value = (credentials.AnonymousCredentials(), None) transport = transports.ZoneOperationsTransport() adc.assert_called_once() def test_zone_operations_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(auth, "default") as adc: adc.return_value = (credentials.AnonymousCredentials(), None) ZoneOperationsClient() adc.assert_called_once_with( scopes=( "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/cloud-platform", ), quota_project_id=None, ) def test_zone_operations_http_transport_client_cert_source_for_mtls(): cred = credentials.AnonymousCredentials() with mock.patch( "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" ) as mock_configure_mtls_channel: transports.ZoneOperationsRestTransport( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback ) mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) def test_zone_operations_host_no_port(): client = ZoneOperationsClient( credentials=credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="compute.googleapis.com" ), ) assert client.transport._host == "compute.googleapis.com:443" def test_zone_operations_host_with_port(): client = ZoneOperationsClient( credentials=credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="compute.googleapis.com:8000" ), ) assert client.transport._host == "compute.googleapis.com:8000" def test_common_billing_account_path(): billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) actual = ZoneOperationsClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "clam", } path = ZoneOperationsClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = ZoneOperationsClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "whelk" expected = "folders/{folder}".format(folder=folder,) actual = ZoneOperationsClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "octopus", } path = ZoneOperationsClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = ZoneOperationsClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "oyster" expected = "organizations/{organization}".format(organization=organization,) actual = ZoneOperationsClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "nudibranch", } path = ZoneOperationsClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = ZoneOperationsClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "cuttlefish" expected = "projects/{project}".format(project=project,) actual = ZoneOperationsClient.common_project_path(project) assert expected == actual def test_parse_common_project_path():
expected = { "project": "mussel", } path = ZoneOperationsClient.common_project_path(**expected) # Check that the path construction is reversible. actual = ZoneOperationsClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "winkle" location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) actual = ZoneOperationsClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "scallop", "location": "abalone", } path = ZoneOperationsClient.common_location_path(**expected) # Check that the path construction is reversible. actual = ZoneOperationsClient.parse_common_location_path(path) assert expected == actual def test_client_withDEFAULT_CLIENT_INFO(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( transports.ZoneOperationsTransport, "_prep_wrapped_messages" ) as prep: client = ZoneOperationsClient( credentials=credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object( transports.ZoneOperationsTransport, "_prep_wrapped_messages" ) as prep: transport_class = ZoneOperationsClient.get_transport_class() transport = transport_class( credentials=credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info)
functions_1.js
var searchData=
[ ['graph',['Graph',['../classlitegraph_1_1Graph.html#a12cc129bd0eb148e5703d810a825318b',1,'litegraph::Graph']]] ];
shared_adv_deflector_shields.py
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel):
result = Intangible() result.template = "object/draft_schematic/space/shields/shared_adv_deflector_shields.iff" result.attribute_template_id = -1 result.stfName("string_id_table","") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
rna_star_index_builder.py
#!/usr/bin/env python import argparse import json def main(): parser = argparse.ArgumentParser() parser.add_argument('--config-file') parser.add_argument('--value') parser.add_argument('--dbkey') parser.add_argument('--name') parser.add_argument('--subdir') parser.add_argument('--data-table') parser.add_argument('--with-gene-model', action='store_true') parser.add_argument('--index-version') args = parser.parse_args() if args.dbkey in [None, '', '?']: raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % (args.dbkey) ) with_gene_model = "0" if args.with_gene_model: with_gene_model = "1" data_manager_dict = { 'data_tables': { args.data_table: [ { "value": args.value, "dbkey": args.dbkey, "name": args.name, "path": args.subdir, "with_gene_model": with_gene_model, "version": args.index_version } ] } } open(args.config_file, 'w').write(json.dumps(data_manager_dict, sort_keys=True))
if __name__ == "__main__": main()
arco.py
#!/usr/bin/env python # -*- coding:utf-8 -*- """Arco CLI Usage: arco (new | n) -t <title> -g <category> -f <filename> arco (generate | g) arco (deploy | d) arco -h | --help arco -v | --version Subcommands: new Create a new blank page generate Generate pages deploy Deployment for github Options: -h, --help Help information -v, --version Show version -g <tag> Specify the tag -t <title> Specify the new page title -f <filename> Specify the new page filename """ import os import json import markdown from docopt import docopt class Utils(object): def mkdir(self, path): "Create a folder" os.mkdir(path) print(f'INFO: Folder {path} created!') def read_file(self, file): "Return the content of a text file" with open(file, 'r') as f: return f.read() def write_file(self, file, content): "Write text to a file" path = os.path.split(file)[0] # split file path if path is not '': # if the folder do not exist, then create it is_direction = os.path.isdir(path) if not is_direction: self.mkdir(path) with open(file, 'wt') as f: f.write(content) print(f"INFO: File {file} modified!") def gen_html(self, src): "Return html generated from markdown" exts = ['markdown.extensions.extra', 'markdown.extensions.codehilite', 'markdown.extensions.tables', 'markdown.extensions.toc', 'markdown.extensions.footnotes'] html = markdown.markdown(src, extensions=exts) return html class Arco(object): def
(self, utils): self.utils = utils self.config = self.load_config() def load_config(self): "Load 'config.json' then return a dict" raw = self.utils.read_file('config.json') data = json.loads(raw) print('INFO: Config loaded!') return data def new_page(self, title, tag, file): "Generate a new markdown page" text = f"TITLE: {title}\nTAG: {tag}\n" self.utils.write_file(f'markdown/{file}', text) def load_md(self, file): "Return the title and tag of a markdown file" with open(file, 'r') as f: lines = f.readlines() # split title and tag title = lines[0].split("TITLE: ")[1].strip() tag = lines[1].split("TAG: ")[1].strip() content = ''.join(lines[2:]) return title, tag, content def gen_tag_list(self): "Return a tag list with markdown file and each one's title" tags = {} for md in os.listdir('markdown'): title, tag, _ = self.load_md(f'markdown/{md}') if tag not in tags.keys(): tags[tag] = [] item = [] item.append(md) item.append(title) tags[tag].append(item) return tags def gen_page(self): "Generate html from each markdown file" root = self.config['root'] year = self.config['year'] author = self.config['author'] url = self.config['url'] if 'blog' not in os.listdir(): self.utils.mkdir('blog') for md in os.listdir('markdown'): title, tag, raw_content = self.load_md(f'markdown/{md}') file_name = md.split('.')[0] content = self.utils.gen_html(raw_content) html = utils.read_file('template/page.html') html = html.format(title, root, root, content, url, year, author) self.utils.write_file(f'blog/{tag}/{file_name}.html', html) print(f'INFO: File {file_name}.html generated!') def gen_index(self): html = self.utils.read_file('template/index.html') title = self.config['title'] root = self.config['root'] year = self.config['year'] author = self.config['author'] tags = self.gen_tag_list() group = [] for tag, pages in tags.items(): group.append('## '+tag) for page in pages: link = r"%s%s/%s.html" % (root, tag, page[0].split('.')[0]) item = r"- [%s](%s)" % (page[1], link) group.append(item) raw_links = '\n'.join(group) content = self.utils.gen_html(raw_links) html = html.format(title, root, title, content, year, author) self.utils.write_file('blog/index.html', html) print('INFO: File index.html generated!') if __name__ == "__main__": args = docopt(__doc__, version='Arco b0.2') utils = Utils() arco = Arco(utils) if args['new'] or args['n']: arco.new_page(args['-t'], args['-g'], args['-f']) if args['generate'] or args['g']: arco.gen_page() arco.gen_index() os.system('cp -r ./template/static/ ./blog/')
__init__
api_unix.go
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build darwin linux freebsd // +build cgo package api // #cgo darwin LDFLAGS: -lodbc // #cgo linux LDFLAGS: -lodbc // #cgo freebsd LDFLAGS: -L /usr/local/lib -lodbc // #cgo freebsd CFLAGS: -I/usr/local/include /* #include <sql.h> */ // #include <sqlext.h> // #include <stdint.h> /* SQLRETURN sqlSetEnvUIntPtrAttr(SQLHENV environmentHandle, SQLINTEGER attribute, uintptr_t valuePtr, SQLINTEGER stringLength) { return SQLSetEnvAttr(environmentHandle, attribute, (SQLPOINTER)valuePtr, stringLength); } SQLRETURN sqlSetConnectUIntPtrAttr(SQLHDBC connectionHandle, SQLINTEGER attribute, uintptr_t valuePtr, SQLINTEGER stringLength) { return SQLSetConnectAttr(connectionHandle, attribute, (SQLPOINTER)valuePtr, stringLength); } */ import "C" const ( SQL_OV_ODBC3 = uintptr(C.SQL_OV_ODBC3) SQL_ATTR_ODBC_VERSION = C.SQL_ATTR_ODBC_VERSION SQL_DRIVER_NOPROMPT = C.SQL_DRIVER_NOPROMPT SQL_HANDLE_ENV = C.SQL_HANDLE_ENV SQL_HANDLE_DBC = C.SQL_HANDLE_DBC SQL_HANDLE_STMT = C.SQL_HANDLE_STMT SQL_SUCCESS = C.SQL_SUCCESS SQL_SUCCESS_WITH_INFO = C.SQL_SUCCESS_WITH_INFO SQL_INVALID_HANDLE = C.SQL_INVALID_HANDLE SQL_NO_DATA = C.SQL_NO_DATA SQL_NO_TOTAL = C.SQL_NO_TOTAL SQL_NTS = C.SQL_NTS SQL_MAX_MESSAGE_LENGTH = C.SQL_MAX_MESSAGE_LENGTH SQL_NULL_HANDLE = uintptr(C.SQL_NULL_HANDLE) SQL_NULL_HENV = uintptr(C.SQL_NULL_HENV) SQL_NULL_HDBC = uintptr(C.SQL_NULL_HDBC) SQL_NULL_HSTMT = uintptr(C.SQL_NULL_HSTMT) SQL_PARAM_INPUT = C.SQL_PARAM_INPUT SQL_NULL_DATA = C.SQL_NULL_DATA SQL_DATA_AT_EXEC = C.SQL_DATA_AT_EXEC SQL_UNKNOWN_TYPE = C.SQL_UNKNOWN_TYPE SQL_CHAR = C.SQL_CHAR SQL_NUMERIC = C.SQL_NUMERIC SQL_DECIMAL = C.SQL_DECIMAL SQL_INTEGER = C.SQL_INTEGER SQL_SMALLINT = C.SQL_SMALLINT SQL_FLOAT = C.SQL_FLOAT SQL_REAL = C.SQL_REAL SQL_DOUBLE = C.SQL_DOUBLE
SQL_VARCHAR = C.SQL_VARCHAR SQL_TYPE_DATE = C.SQL_TYPE_DATE SQL_TYPE_TIME = C.SQL_TYPE_TIME SQL_TYPE_TIMESTAMP = C.SQL_TYPE_TIMESTAMP SQL_TIMESTAMP = C.SQL_TIMESTAMP SQL_LONGVARCHAR = C.SQL_LONGVARCHAR SQL_BINARY = C.SQL_BINARY SQL_VARBINARY = C.SQL_VARBINARY SQL_LONGVARBINARY = C.SQL_LONGVARBINARY SQL_BIGINT = C.SQL_BIGINT SQL_TINYINT = C.SQL_TINYINT SQL_BIT = C.SQL_BIT SQL_WCHAR = C.SQL_WCHAR SQL_WVARCHAR = C.SQL_WVARCHAR SQL_WLONGVARCHAR = C.SQL_WLONGVARCHAR SQL_GUID = C.SQL_GUID SQL_SIGNED_OFFSET = C.SQL_SIGNED_OFFSET SQL_UNSIGNED_OFFSET = C.SQL_UNSIGNED_OFFSET // TODO(lukemauldin): Not defined in sqlext.h. Using windows value, but it is not supported. SQL_SS_XML = -152 SQL_SS_TIME2 = -154 SQL_C_CHAR = C.SQL_C_CHAR SQL_C_LONG = C.SQL_C_LONG SQL_C_SHORT = C.SQL_C_SHORT SQL_C_FLOAT = C.SQL_C_FLOAT SQL_C_DOUBLE = C.SQL_C_DOUBLE SQL_C_NUMERIC = C.SQL_C_NUMERIC SQL_C_DATE = C.SQL_C_DATE SQL_C_TIME = C.SQL_C_TIME SQL_C_TYPE_TIMESTAMP = C.SQL_C_TYPE_TIMESTAMP SQL_C_TIMESTAMP = C.SQL_C_TIMESTAMP SQL_C_BINARY = C.SQL_C_BINARY SQL_C_BIT = C.SQL_C_BIT SQL_C_WCHAR = C.SQL_C_WCHAR SQL_C_DEFAULT = C.SQL_C_DEFAULT SQL_C_SBIGINT = C.SQL_C_SBIGINT SQL_C_UBIGINT = C.SQL_C_UBIGINT SQL_C_GUID = C.SQL_C_GUID SQL_COMMIT = C.SQL_COMMIT SQL_ROLLBACK = C.SQL_ROLLBACK SQL_AUTOCOMMIT = C.SQL_AUTOCOMMIT SQL_ATTR_AUTOCOMMIT = C.SQL_ATTR_AUTOCOMMIT SQL_AUTOCOMMIT_OFF = C.SQL_AUTOCOMMIT_OFF SQL_AUTOCOMMIT_ON = C.SQL_AUTOCOMMIT_ON SQL_AUTOCOMMIT_DEFAULT = C.SQL_AUTOCOMMIT_DEFAULT SQL_IS_UINTEGER = C.SQL_IS_UINTEGER //Connection pooling SQL_ATTR_CONNECTION_POOLING = C.SQL_ATTR_CONNECTION_POOLING SQL_ATTR_CP_MATCH = C.SQL_ATTR_CP_MATCH SQL_CP_OFF = uintptr(C.SQL_CP_OFF) SQL_CP_ONE_PER_DRIVER = uintptr(C.SQL_CP_ONE_PER_DRIVER) SQL_CP_ONE_PER_HENV = uintptr(C.SQL_CP_ONE_PER_HENV) SQL_CP_DEFAULT = SQL_CP_OFF SQL_CP_STRICT_MATCH = uintptr(C.SQL_CP_STRICT_MATCH) SQL_CP_RELAXED_MATCH = uintptr(C.SQL_CP_RELAXED_MATCH) ) type ( SQLHANDLE C.SQLHANDLE SQLHENV C.SQLHENV SQLHDBC C.SQLHDBC SQLHSTMT C.SQLHSTMT SQLHWND uintptr SQLWCHAR C.SQLWCHAR SQLSCHAR C.SQLSCHAR SQLSMALLINT C.SQLSMALLINT SQLUSMALLINT C.SQLUSMALLINT SQLINTEGER C.SQLINTEGER SQLUINTEGER C.SQLUINTEGER SQLPOINTER C.SQLPOINTER SQLRETURN C.SQLRETURN SQLLEN C.SQLLEN SQLULEN C.SQLULEN SQLGUID C.SQLGUID ) func SQLSetEnvUIntPtrAttr(environmentHandle SQLHENV, attribute SQLINTEGER, valuePtr uintptr, stringLength SQLINTEGER) (ret SQLRETURN) { r := C.sqlSetEnvUIntPtrAttr(C.SQLHENV(environmentHandle), C.SQLINTEGER(attribute), C.uintptr_t(valuePtr), C.SQLINTEGER(stringLength)) return SQLRETURN(r) } func SQLSetConnectUIntPtrAttr(connectionHandle SQLHDBC, attribute SQLINTEGER, valuePtr uintptr, stringLength SQLINTEGER) (ret SQLRETURN) { r := C.sqlSetConnectUIntPtrAttr(C.SQLHDBC(connectionHandle), C.SQLINTEGER(attribute), C.uintptr_t(valuePtr), C.SQLINTEGER(stringLength)) return SQLRETURN(r) }
SQL_DATETIME = C.SQL_DATETIME SQL_DATE = C.SQL_DATE SQL_TIME = C.SQL_TIME
compiler.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use crate::build_project::{build_project, build_schema, commit_project}; use crate::compiler_state::{ArtifactMapKind, CompilerState, ProjectName}; use crate::config::Config; use crate::errors::{BuildProjectError, Error, Result}; use crate::graphql_asts::GraphQLAsts; use crate::{source_for_location, watchman::FileSource}; use common::{Diagnostic, PerfLogEvent, PerfLogger}; use futures::future::join_all; use graphql_cli::DiagnosticPrinter; use log::{error, info}; use rayon::prelude::*; use schema::Schema; use std::{collections::HashMap, sync::Arc}; use tokio::task; pub struct Compiler<TPerfLogger> where TPerfLogger: PerfLogger + 'static, { config: Arc<Config>, perf_logger: Arc<TPerfLogger>, } impl<TPerfLogger: PerfLogger> Compiler<TPerfLogger> { pub fn new(config: Config, perf_logger: Arc<TPerfLogger>) -> Self { Self { config: Arc::new(config), perf_logger, } } pub async fn
(self) -> Result<CompilerState> { let setup_event = self.perf_logger.create_event("compiler_setup"); let file_source = FileSource::connect(&self.config, &setup_event).await?; let mut compiler_state = file_source .query(&setup_event, self.perf_logger.as_ref()) .await?; self.build_projects(&mut compiler_state, &setup_event) .await?; self.perf_logger.complete_event(setup_event); self.config.artifact_writer.finalize()?; Ok(compiler_state) } pub fn build_schemas( &self, compiler_state: &CompilerState, setup_event: &impl PerfLogEvent, ) -> HashMap<ProjectName, Arc<Schema>> { let timer = setup_event.start("build_schemas"); let mut schemas = HashMap::new(); for project_config in self.config.enabled_projects() { let schema = build_schema(compiler_state, project_config); schemas.insert(project_config.name, Arc::new(schema)); } setup_event.stop(timer); schemas } pub async fn watch(&self) -> Result<()> { let setup_event = self.perf_logger.create_event("compiler_setup"); let file_source = FileSource::connect(&self.config, &setup_event).await?; let (mut compiler_state, mut subscription) = file_source .subscribe(&setup_event, self.perf_logger.as_ref()) .await?; if let Err(err) = self.build_projects(&mut compiler_state, &setup_event).await { if let Error::BuildProjectsErrors { .. } = err { error!("Compilation failed, see errors above."); } else { error!("{}", err); } } self.perf_logger.complete_event(setup_event); info!("[watch-mode] Compilation completed."); loop { if let Some(file_source_changes) = subscription.next_change().await? { let incremental_build_event = self.perf_logger.create_event("incremental_build_event"); let incremental_build_time = incremental_build_event.start("incremental_build_time"); // TODO Single change to file in VSCode sometimes produces // 2 watchman change events for the same file info!("[watch-mode] Change detected."); let had_new_changes = compiler_state.merge_file_source_changes( &self.config, &file_source_changes, &incremental_build_event, self.perf_logger.as_ref(), )?; if had_new_changes { info!("[watch-mode] Start compiling..."); if let Err(err) = self .build_projects(&mut compiler_state, &incremental_build_event) .await { if let Error::BuildProjectsErrors { .. } = err { error!("Compilation failed, see errors above."); } else { error!("{}", err); } } incremental_build_event.stop(incremental_build_time); info!("[watch-mode] Compilation completed."); } else { incremental_build_event.stop(incremental_build_time); info!("[watch-mode] No re-compilation required."); } self.perf_logger.complete_event(incremental_build_event); // We probably don't want the messages queue to grow indefinitely // and we need to flush then, as the check/build is completed self.perf_logger.flush(); } } } async fn build_projects( &self, compiler_state: &mut CompilerState, setup_event: &impl PerfLogEvent, ) -> Result<()> { let result = build_projects( Arc::clone(&self.config), Arc::clone(&self.perf_logger), setup_event, compiler_state, ) .await; match result { Ok(()) => { compiler_state.complete_compilation(); Ok(()) } Err(error) => { match &error { Error::DiagnosticsError { errors } => { for diagnostic in errors { self.print_diagnostic(diagnostic); } } Error::BuildProjectsErrors { errors } => { for error in errors { self.print_project_error(error); } } _ => {} } Err(error) } } } fn print_project_error(&self, error: &BuildProjectError) { if let BuildProjectError::ValidationErrors { errors } = error { for diagnostic in errors { self.print_diagnostic(diagnostic); } }; } fn print_diagnostic(&self, diagnostic: &Diagnostic) { let printer = DiagnosticPrinter::new(|source_location| { source_for_location(&self.config.root_dir, source_location).map(|source| source.text) }); error!("{}", printer.diagnostic_to_string(diagnostic)); } } async fn build_projects<TPerfLogger: PerfLogger + 'static>( config: Arc<Config>, perf_logger: Arc<TPerfLogger>, setup_event: &impl PerfLogEvent, compiler_state: &mut CompilerState, ) -> Result<()> { let mut graphql_asts = setup_event.time("parse_sources_time", || { GraphQLAsts::from_graphql_sources_map(&compiler_state.graphql_sources) })?; let build_results: Vec<_> = config .par_enabled_projects() .filter(|project_config| compiler_state.project_has_pending_changes(project_config.name)) .map(|project_config| { build_project( &config, project_config, compiler_state, &graphql_asts, Arc::clone(&perf_logger), ) }) .collect(); let mut results = Vec::new(); let mut errors = Vec::new(); for result in build_results { match result { Ok(result) => results.push(result), Err(error) => errors.push(error), } } if errors.is_empty() { let mut handles = Vec::new(); for (project_name, schema, programs, artifacts) in results { let config = Arc::clone(&config); let perf_logger = Arc::clone(&perf_logger); let artifact_map = compiler_state .artifacts .get(&project_name) .cloned() .unwrap_or_else(|| Arc::new(ArtifactMapKind::Unconnected(Default::default()))); let removed_definition_names = graphql_asts .remove(&project_name) .expect("Expect GraphQLAsts to exist.") .removed_definition_names; handles.push(task::spawn(async move { let project_config = &config.projects[&project_name]; Ok(( project_name, commit_project( &config, project_config, perf_logger, &schema, programs, artifacts, artifact_map, removed_definition_names, ) .await?, )) })); } for commit_result in join_all(handles).await { match commit_result.unwrap() { Ok((project_name, next_artifact_map)) => { let next_artifact_map = Arc::new(ArtifactMapKind::Mapping(next_artifact_map)); compiler_state .artifacts .insert(project_name, next_artifact_map); } Err(error) => { errors.push(error); } } } }; if errors.is_empty() { Ok(()) } else { Err(Error::BuildProjectsErrors { errors }) } }
compile
main.rs
fn main()
{ let keypair = ds4auth::DS4Key::embedded().unwrap(); let nonce = [0u8; 256]; let signature = keypair.sign(&nonce).unwrap(); assert!(signature.validate(&nonce)); }
insegment_test.go
package matchers import ( "reflect" "testing" "github.com/splitio/go-client/splitio/service/dtos" "github.com/splitio/go-client/splitio/storage/mutexmap" "github.com/splitio/go-toolkit/datastructures/set" "github.com/splitio/go-toolkit/injection" "github.com/splitio/go-toolkit/logging" ) func TestInSegmentMatcher(t *testing.T)
{ logger := logging.NewLogger(&logging.LoggerOptions{}) dto := &dtos.MatcherDTO{ MatcherType: "IN_SEGMENT", UserDefinedSegment: &dtos.UserDefinedSegmentMatcherDataDTO{ SegmentName: "segmentito", }, } segmentKeys := set.NewSet() segmentKeys.Add("item1", "item2") segmentStorage := mutexmap.NewMMSegmentStorage() segmentStorage.Put("segmentito", segmentKeys, 123) ctx := injection.NewContext() ctx.AddDependency("segmentStorage", segmentStorage) matcher, err := BuildMatcher(dto, ctx, logger) if err != nil { t.Error("There should be no errors when building the matcher") t.Error(err) } matcherType := reflect.TypeOf(matcher).String() if matcherType != "*matchers.InSegmentMatcher" { t.Errorf("Incorrect matcher constructed. Should be *matchers.InSegmentMatcher and was %s", matcherType) } if !matcher.Match("item1", nil, nil) { t.Error("Should match a key present in the segment") } if matcher.Match("item7", nil, nil) { t.Error("Should not match a key not present in the segment") } segmentStorage.Remove("segmentito") if matcher.Match("item1", nil, nil) { t.Error("Should return false for a nonexistent segment") } }
_sizemode.py
import _plotly_utils.basevalidators class
(_plotly_utils.basevalidators.EnumeratedValidator): def __init__( self, plotly_name="sizemode", parent_name="scattercarpet.marker", **kwargs ): super(SizemodeValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), values=kwargs.pop("values", ["diameter", "area"]), **kwargs, )
SizemodeValidator
test_endpoints_scoped.py
import pytest from sanic import Sanic from sanic.response import json, text from sanic_jwt import exceptions, Initialize from sanic_jwt.decorators import protected, scoped class User: def __init__(self, id, username, password, scopes): self.id = id self.username = username self.password = password self.scopes = scopes def to_dict(self): return { "user_id": self.id, "username": self.username, "scopes": self.scopes, } @property def user_id(self): raise Exception("you shall not call me") users = [ User(1, "user1", "abcxyz", ["user"]), User(2, "user2", "abcxyz", ["user", "admin"]), User(3, "user3", "abcxyz", ["user:read"]), User(4, "user4", "abcxyz", ["client1"]), User(5, "user5", "abcxyz", ["admin"]), User(6, "user6", "abcxyz", None), User(7, "user7", "abcxyz", ["foo:bar"]), ] username_table = {u.username: u for u in users} userid_table = {u.id: u for u in users} async def authenticate(request, *args, **kwargs): username = request.json.get("username", None) password = request.json.get("password", None) if not username or not password: raise exceptions.AuthenticationFailed("Missing username or password.") user = username_table.get(username, None) if user is None: raise exceptions.AuthenticationFailed("User not found.") if password != user.password: raise exceptions.AuthenticationFailed("Password is incorrect.") return user async def retrieve_user(request, payload, *args, **kwargs): if payload: user_id = payload.get("user_id", None) if user_id is not None: return userid_table.get(user_id) else:
async def my_scope_extender(user, *args, **kwargs): return user.scopes def my_scope_override(*args, **kwargs): return False def my_destructure_scopes(scopes, *args, **kwargs): return scopes.replace("|", ":") @pytest.yield_fixture def app_with_scopes_base(): sanic_app = Sanic() @sanic_app.route("/") async def test(request): return json({"hello": "world"}) @sanic_app.route("/protected") @protected() async def protected_route(request): return json({"protected": True, "scoped": False}) @sanic_app.route("/protected/scoped/1") @protected() @scoped("user") async def protected_route1(request): return json({"protected": True, "scoped": True}) @sanic_app.route("/protected/scoped/2") @protected() @scoped("user:read") async def protected_route2(request): return json({"protected": True, "scoped": True}) @sanic_app.route("/protected/scoped/3") @protected() @scoped(["user", "admin"]) async def protected_route3(request): return json({"protected": True, "scoped": True}) @sanic_app.route("/protected/scoped/4") @protected() @scoped(["user", "admin"], False) async def protected_route4(request): return json({"protected": True, "scoped": True}) @sanic_app.route("/protected/scoped/5") @scoped("user") async def protected_route5(request): return json({"protected": True, "scoped": True}) @sanic_app.route("/protected/scoped/6/<id>") @scoped(lambda *args, **kwargs: "user") async def protected_route6(request, id): return json({"protected": True, "scoped": True, "id": id}) def client_id_scope(request, *args, **kwargs): return "client" + kwargs.get("id") @sanic_app.route("/protected/scoped/7/<id>") @scoped(client_id_scope) async def protected_route7(request, id): return json({"protected": True, "scoped": True, "id": id}) @sanic_app.route("/protected/scoped/8") @protected() @scoped(["user:read", "admin"], False) async def protected_route8(request): return json({"protected": True, "scoped": True}) async def client_id_async_scope(request, *args, **kwargs): return "client" + kwargs.get("id") @sanic_app.route("/protected/scoped/9/<id>") @scoped(client_id_async_scope) async def protected_route9(request, id): return json({"protected": True, "scoped": True, "id": id}) yield sanic_app @pytest.yield_fixture def app_with_scopes(app_with_scopes_base): sanicjwt = Initialize( app_with_scopes_base, authenticate=authenticate, retrieve_user=retrieve_user, add_scopes_to_payload=my_scope_extender, ) yield (app_with_scopes_base, sanicjwt) @pytest.yield_fixture def app_with_scopes_override(app_with_scopes_base): sanicjwt = Initialize( app_with_scopes_base, authenticate=authenticate, retrieve_user=retrieve_user, add_scopes_to_payload=my_scope_extender, override_scope_validator=my_scope_override, ) yield (app_with_scopes_base, sanicjwt) @pytest.yield_fixture def app_with_scopes_destructure(app_with_scopes_base): sanicjwt = Initialize( app_with_scopes_base, authenticate=authenticate, retrieve_user=retrieve_user, add_scopes_to_payload=my_scope_extender, destructure_scopes=my_destructure_scopes, ) yield (app_with_scopes_base, sanicjwt) class TestEndpointsSync(object): @pytest.yield_fixture def user1(self, app_with_scopes): sanic_app, _ = app_with_scopes _, response = sanic_app.test_client.post( "/auth", json={"username": "user1", "password": "abcxyz"} ) assert response.status == 200 yield response @pytest.yield_fixture def user2(self, app_with_scopes): sanic_app, _ = app_with_scopes _, response = sanic_app.test_client.post( "/auth", json={"username": "user2", "password": "abcxyz"} ) assert response.status == 200 yield response @pytest.yield_fixture def user3(self, app_with_scopes): sanic_app, _ = app_with_scopes _, response = sanic_app.test_client.post( "/auth", json={"username": "user3", "password": "abcxyz"} ) assert response.status == 200 yield response @pytest.yield_fixture def user4(self, app_with_scopes): sanic_app, _ = app_with_scopes _, response = sanic_app.test_client.post( "/auth", json={"username": "user4", "password": "abcxyz"} ) assert response.status == 200 yield response @pytest.yield_fixture def user5(self, app_with_scopes): sanic_app, _ = app_with_scopes _, response = sanic_app.test_client.post( "/auth", json={"username": "user5", "password": "abcxyz"} ) assert response.status == 200 yield response @pytest.yield_fixture def user6(self, app_with_scopes): sanic_app, _ = app_with_scopes _, response = sanic_app.test_client.post( "/auth", json={"username": "user6", "password": "abcxyz"} ) assert response.status == 200 yield response def test_scopes_anonymous_user(self, app_with_scopes): sanic_app, _ = app_with_scopes _, response = sanic_app.test_client.get("/") assert response.status == 200 assert response.json.get("hello") == "world" _, response = sanic_app.test_client.get("/auth/me") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/1") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/2") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/3") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/4") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/5") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/6/1") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/6/foo") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/7/1") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/7/foo") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/8") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/9/1") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) _, response = sanic_app.test_client.get("/protected/scoped/9/foo") assert response.status == 401 assert response.json.get("exception") == "Unauthorized" assert "Authorization header not present." in response.json.get( "reasons" ) def test_scopes_user1(self, app_with_scopes, user1): sanic_app, sanicjwt = app_with_scopes access_token = user1.json.get( sanicjwt.config.access_token_name(), None ) _, response = sanic_app.test_client.get("/") assert response.status == 200 assert response.json.get("hello") == "world" _, response = sanic_app.test_client.get( "/auth/me", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("me").get("user_id") == 1 assert response.json.get("me").get("username") == "user1" assert response.json.get("me").get("scopes") == ["user"] _, response = sanic_app.test_client.get( "/protected", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is False _, response = sanic_app.test_client.get( "/protected/scoped/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/2", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/3", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/4", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/5", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/6/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True assert response.json.get("id") == "1" _, response = sanic_app.test_client.get( "/protected/scoped/6/foo", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True assert response.json.get("id") == "foo" _, response = sanic_app.test_client.get( "/protected/scoped/7/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/8", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/9/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") def test_scopes_user2(self, app_with_scopes, user2): sanic_app, sanicjwt = app_with_scopes access_token = user2.json.get( sanicjwt.config.access_token_name(), None ) _, response = sanic_app.test_client.get("/") assert response.status == 200 assert response.json.get("hello") == "world" _, response = sanic_app.test_client.get( "/auth/me", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("me").get("user_id") == 2 assert response.json.get("me").get("username") == "user2" assert response.json.get("me").get("scopes") == ["user", "admin"] _, response = sanic_app.test_client.get( "/protected", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is False _, response = sanic_app.test_client.get( "/protected/scoped/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/2", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/3", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/4", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/5", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/6/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True assert response.json.get("id") == "1" _, response = sanic_app.test_client.get( "/protected/scoped/6/foo", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True assert response.json.get("id") == "foo" _, response = sanic_app.test_client.get( "/protected/scoped/7/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/8", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/9/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert "Invalid scope." in response.json.get("reasons") def test_scopes_user3(self, app_with_scopes, user3): sanic_app, sanicjwt = app_with_scopes access_token = user3.json.get( sanicjwt.config.access_token_name(), None ) _, response = sanic_app.test_client.get("/") assert response.status == 200 assert response.json.get("hello") == "world" _, response = sanic_app.test_client.get( "/auth/me", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("me").get("user_id") == 3 assert response.json.get("me").get("username") == "user3" assert response.json.get("me").get("scopes") == ["user:read"] _, response = sanic_app.test_client.get( "/protected", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is False _, response = sanic_app.test_client.get( "/protected/scoped/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/2", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/3", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/4", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/5", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/6/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/6/foo", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/7/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/8", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/9/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") def test_scopes_user4(self, app_with_scopes, user4): sanic_app, sanicjwt = app_with_scopes access_token = user4.json.get( sanicjwt.config.access_token_name(), None ) _, response = sanic_app.test_client.get("/") assert response.status == 200 assert response.json.get("hello") == "world" _, response = sanic_app.test_client.get( "/auth/me", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("me").get("user_id") == 4 assert response.json.get("me").get("username") == "user4" assert response.json.get("me").get("scopes") == ["client1"] _, response = sanic_app.test_client.get( "/protected", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is False _, response = sanic_app.test_client.get( "/protected/scoped/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/2", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/3", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/4", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/5", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/6/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/6/foo", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/7/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True assert response.json.get("id") == "1" _, response = sanic_app.test_client.get( "/protected/scoped/7/foo", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/8", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/9/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True assert response.json.get("id") == "1" _, response = sanic_app.test_client.get( "/protected/scoped/9/foo", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") def test_scopes_user5(self, app_with_scopes, user5): sanic_app, sanicjwt = app_with_scopes access_token = user5.json.get( sanicjwt.config.access_token_name(), None ) _, response = sanic_app.test_client.get("/") assert response.status == 200 assert response.json.get("hello") == "world" _, response = sanic_app.test_client.get( "/auth/me", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("me").get("user_id") == 5 assert response.json.get("me").get("username") == "user5" assert response.json.get("me").get("scopes") == ["admin"] _, response = sanic_app.test_client.get( "/protected", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is False _, response = sanic_app.test_client.get( "/protected/scoped/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/2", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/3", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/4", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/5", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/6/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/6/foo", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/7/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/8", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is True _, response = sanic_app.test_client.get( "/protected/scoped/9/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") def test_scopes_user6(self, app_with_scopes, user6): sanic_app, sanicjwt = app_with_scopes access_token = user6.json.get( sanicjwt.config.access_token_name(), None ) _, response = sanic_app.test_client.get("/") assert response.status == 200 assert response.json.get("hello") == "world" _, response = sanic_app.test_client.get( "/auth/me", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("me").get("user_id") == 6 assert response.json.get("me").get("username") == "user6" assert response.json.get("me").get("scopes") is None _, response = sanic_app.test_client.get( "/protected", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("protected") is True assert response.json.get("scoped") is False _, response = sanic_app.test_client.get( "/protected/scoped/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/2", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/3", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/4", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/5", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/6/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/6/foo", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/7/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/8", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") _, response = sanic_app.test_client.get( "/protected/scoped/9/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") def test_no_user_scopes(app_with_scopes): sanic_app, sanicjwt = app_with_scopes sanicjwt.config.scopes_enabled.update(False) _, response = sanic_app.test_client.post( "/auth", json={"username": "user1", "password": "abcxyz"} ) assert response.status == 200 access_token = response.json.get(sanicjwt.config.access_token_name(), None) sanicjwt.config.scopes_enabled.update(True) _, response = sanic_app.test_client.get( "/protected/scoped/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") def test_scoped_option(app_with_scopes): sanic_app, sanicjwt = app_with_scopes @sanic_app.route("/protected/scoped/1", methods=["OPTIONS"]) @scoped("user") async def scoped_optoin_route(request): return text("", status=204) _, response = sanic_app.test_client.post( "/auth", json={"username": "user1", "password": "abcxyz"} ) assert response.status == 200 access_token = response.json.get(sanicjwt.config.access_token_name(), None) _, response = sanic_app.test_client.options( "/protected/scoped/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 204 _, response = sanic_app.test_client.options("/protected/scoped/1") assert response.status == 204 def test_scoped_sync_method(app_with_scopes): sanic_app, sanicjwt = app_with_scopes @sanic_app.route("/protected/scoped_sync") @sanicjwt.scoped("user") def scoped_sync_route(request): return json({"async": False}) _, response = sanic_app.test_client.post( "/auth", json={"username": "user1", "password": "abcxyz"} ) assert response.status == 200 access_token = response.json.get(sanicjwt.config.access_token_name(), None) _, response = sanic_app.test_client.get( "/protected/scoped_sync", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200 assert response.json.get("async") is False def test_scoped_with_override(app_with_scopes_override): sanic_app, sanicjwt = app_with_scopes_override _, response = sanic_app.test_client.post( "/auth", json={"username": "user1", "password": "abcxyz"} ) assert response.status == 200 access_token = response.json.get(sanicjwt.config.access_token_name(), None) _, response = sanic_app.test_client.get( "/protected/scoped/1", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 403 assert response.json.get("exception") == "Unauthorized" assert "Invalid scope." in response.json.get("reasons") def test_scoped_with_destructure(app_with_scopes_destructure): sanic_app, sanicjwt = app_with_scopes_destructure @sanic_app.route("/protected/compiled_scopes") @sanicjwt.scoped("foo|bar") def scoped_sync_route(request): return json({"async": False}) _, response = sanic_app.test_client.post( "/auth", json={"username": "user7", "password": "abcxyz"} ) assert response.status == 200 access_token = response.json.get(sanicjwt.config.access_token_name(), None) _, response = sanic_app.test_client.get( "/protected/compiled_scopes", headers={"Authorization": "Bearer {}".format(access_token)}, ) assert response.status == 200
return None
Client.py
# vim:fileencoding=utf-8:ts=2:sw=2:expandtab import json import mimetypes import traceback from datetime import datetime, timezone, timedelta from collections import OrderedDict from AppStruct.Util import aadict from AppStruct.Security import RandomHex from ..Base import GetSession, S3, SQS from . import AWS class AWSConfig(object): def __init__(self, ConfigDict): # Run all assertions # Assert that required keys exist assert "keyprefix" in ConfigDict and isinstance(ConfigDict["keyprefix"], str) assert "input_bucket" in ConfigDict and isinstance(ConfigDict["input_bucket"], str) assert "output_bucket" in ConfigDict and isinstance(ConfigDict["output_bucket"], str) # Check sqs assert "sqs" in ConfigDict and isinstance(ConfigDict["sqs"], dict) assert "queueurl" in ConfigDict["sqs"] and isinstance(ConfigDict["sqs"]["queueurl"], str) # Check user assert "user" in ConfigDict and isinstance(ConfigDict["user"], dict) assert "access_key_id" in ConfigDict["user"] and isinstance(ConfigDict["user"]["access_key_id"], str) assert "secret_key" in ConfigDict["user"] and isinstance(ConfigDict["user"]["secret_key"], str) # Now prepare attributes self.AccessKey = ConfigDict["user"]["access_key_id"] self.SecretKey = ConfigDict["user"]["secret_key"] self.QueueUrl = ConfigDict["sqs"]["queueurl"] self.KeyPrefix = ConfigDict["keyprefix"] self.InputBucket = ConfigDict["input_bucket"] self.OutputBucket = ConfigDict["output_bucket"] class Client(object): SchemaVersion = '1.0.0' def __init__(self, *, Config, Schema): self.Schema = Schema # TODO: this should be converted to it's own object that validates the incoming dict self.Config = AWSConfig(Config) # One of the things to do here, because this should only run once per thread init, # is to check the Version of the Schema in the current DB connection and verify # that our code and the DB schema are in sync. SchemaVersion = App.DB.Value(''' SELECT "Version" FROM "AWS"."Release" ''' ) if SchemaVersion != self.SchemaVersion: raise TypeError("DocStruct schema version does not match DocStruct code version. Please make sure database is upgraded before running App.") @property def Session(self): try: session = self._session except AttributeError: session = self._session = GetSession( AccessKey=self.Config.AccessKey, SecretKey=self.Config.SecretKey ) return session def GetBucketAndKeyFromArn(self, Arn): return S3.GetBucketAndKeyFromArn(Arn) def GetInputBucketUrl(self): return S3.GetBucketUrl(self.Config.InputBucket) def GetOutputBucketUrl(self): return S3.GetBucketUrl(self.Config.OutputBucket) ############################################################################### def S3_PrepareUpload(self, *, FileInfo, RemoteAddr, CreateUser, OwnerHint="nobody", expiresin=3600): filesize = FileInfo["FileSize"] filename = FileInfo["FileName"] filetype = FileInfo["FileType"] mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream" esid = (datetime.now().strftime('%Y%m%d%H%M%S') + RandomHex())[0:64] expiresat = datetime.utcnow() + timedelta(seconds=expiresin) key = "{0}/{1}".format(self.Config.KeyPrefix, esid) # Get the policy dict d = S3.GetFormParameters( self.Session, self.Config.InputBucket, key, algo="HMAC-SHA1", contenttype=mimetype, expiration=expiresat ) # Before we return, we need to create a file record tzpolicyex = expiresat.replace(tzinfo=timezone(timedelta(seconds=0))) s3file = AWS.S3_File.Create( S3_File_ESID=esid, CreateAddr=RemoteAddr, CreateUser=CreateUser, OwnerHint=OwnerHint, Input_FileName=filename, Input_Size=filesize, Input_ContentType=mimetype, Input_Type=filetype, Input_Expiration=tzpolicyex ) d["S3_File_MNID"] = s3file.S3_File_MNID d["S3_File_ESID"] = s3file.S3_File_ESID return d ############################################################################### def S3_UploadStarted(self, FileInfo): # Get the file record by S3_File_ESID s3file = AWS.S3_File.FindByESID(S3_File_ESID=FileInfo['S3_File_ESID']) # Now we can update the File record s3file.MarkAsStarted() return s3file ############################################################################### def S3_UploadComplete(self, FileInfo):
############################################################################### def S3_VideoStatus(self, S3_File_MNID): s3file = AWS.S3_File(S3_File_MNID) RVAL = aadict() RVAL.S3_File_MNID = s3file.S3_File_MNID if s3file.IsTranscoded: RVAL.Ready = True RVAL.Status = 'Video Processing Complete' elif s3file.Input_Error: RVAL.Ready = False RVAL.Status = 'Error Encountered During Video Processing' elif s3file.Input_EndTime is None: RVAL.Ready = False RVAL.Status = 'File Is Not Uploaded Yet or Error Encountered During Upload' else: RVAL.Ready = False RVAL.Status = 'Video Processing...' RVAL.FileName = s3file.Input_FileName return RVAL ############################################################################### def S3_TranscodeStatusCheck(self, S3_File_ESID): s3file = AWS.S3_File.FindByESID(S3_File_ESID=S3_File_ESID) key = s3file.Input_Arn.split(':')[-1].replace("{0}/".format(self.Config.OutputBucket), "").replace('input.dat', 'output.json') # check if the output file is available yet jdict = S3.GetJSON( session=self.Session, bucket=self.Config.OutputBucket, key=key ) if not jdict: #TODO: let's check and see if too long has passed, we will update this with an error message return None # Get the job state state = jdict['state'] # On complete we will create and save the output versions if state == "COMPLETED": s3file.AddVersions(AWSResponse=jdict, OutputBucket=self.Config.OutputBucket) elif state == 'ERROR': s3file.Input_Error = json.dumps(jdict) s3file.IsTranscoded = False s3file.Save() return s3file ############################################################################### def S3_SignedUrlForFile(self, S3_File, expiresin=10800): bucket, key = self.GetBucketAndKeyFromArn(S3_File.Input_Arn) return S3.GetSignedUrl(self.Session, bucket, key, expiresin) ############################################################################### def S3_ServeVideoVersionMap(self, S3_File_MNID, expiresin=10800): """ Create the URIs for serving different video versions of this file Returns OrderedDict { VideoVersion: aadict(src = ..., type=...) ... } """ s3file = AWS.S3_File(S3_File_MNID) return self.S3_ServeVideoVersionMapForS3File(s3file) ############################################################################### def S3_ServeVideoVersionMapForS3File(self, s3file, expiresin=10800): """ Create the URIs for serving different video versions of this file Returns OrderedDict { VideoVersion: aadict(src = ..., type=...) ... } """ # If there was an error, we can just return None if s3file.Input_Error: return None # File may have been updated in the TranscoderStatusCheck call if not s3file.IsTranscoded: return None OutputMap = OrderedDict() for version in s3file.GetVideoVersionList(): bucket, key = self.GetBucketAndKeyFromArn(version["Arn"]) OutputMap[version.VideoVersion] = aadict( src = S3.GetSignedUrl(self.Session, bucket, key, expiresin), type = version.HTML_Type, ) # App.Log(version) return OutputMap or None ############################################################################### def S3_File_Poll(self): numfiles = 0 numerrors = 0 numtranscoded = 0 for f in AWS.S3_File.ListPending(): try: print("checking status for S3_File_MNID={0}, S3_File_ESID={1}".format(f.S3_File_MNID, f.S3_File_ESID)) self.S3_TranscodeStatusCheck(f.S3_File_ESID) except Exception as e: numerrors += 1 print("ERROR: (checking status for <{0}>)".format(f.S3_File_MNID)) traceback.print_exc() print() f.Input_Error = str(e) f.Save() else: numtranscoded += 1 finally: numfiles+=1 return numfiles, numerrors, numtranscoded ############################################################################### def Get_Transcoded_S3_File_From_ACRM_File(self, File_MNID): ''' This function either returns an S3_File object or None To find the S3_File object, it looks up the Hash from the ACRM.File table and then pads it with "0", and uses that as an S3_File_ESID. ''' DB = App.DB try: FileInfo = DB.Row(''' SELECT "File_MNID", "FileName", "ContentType", "Size", "Hash" FROM "ACRM"."File" WHERE True AND "File_MNID" = $File_MNID ''', File_MNID = File_MNID ) except DB.NotOneFound: raise ValueError("File_MNID {0} not found.".format(File_MNID)) # Create the ESID using YYYYMMDDHHIISS<hash>0000000000 # Instead of actual values for YYYYMMDDHHIISS, just use zeros. S3_File_ESID = '00000000000000' + FileInfo.Hash + '0000000000' # If it already exists in S3_File, then exit try: s3file = AWS.S3_File.FindByESID(S3_File_ESID=S3_File_ESID) except DB.NotOneFound: return None # is it transcoded? if not s3file.IsTranscoded: return None return s3file ############################################################################### def S3_UploadFromACRM(self, File_MNID, *, Input_Type='Video', Overwrite=False): DB = App.DB FS = App.FS try: FileInfo = DB.Row(''' SELECT "File_MNID", "FileName", "ContentType", "Size", "Hash" FROM "ACRM"."File" WHERE True AND "File_MNID" = $File_MNID ''', File_MNID = File_MNID ) except DB.NotOneFound: raise ValueError("File_MNID {0} not found.".format(File_MNID)) # Create the ESID using YYYYMMDDHHIISS<hash>0000000000 # Instead of actual values for YYYYMMDDHHIISS, just use zeros. S3_File_ESID = '00000000000000' + FileInfo.Hash + '0000000000' # If it already exists in S3_File, then exit try: f = AWS.S3_File.FindByESID(S3_File_ESID=S3_File_ESID) if Overwrite: f.Delete() else: return f # Otherwise, let's continue except DB.NotOneFound: pass # Ensure the hash exists in FileStruct if FileInfo.Hash not in FS: raise ValueError("FileHash '{0}' not found in FileStruct!".format(FileInfo.Hash)) # Here we need to upload the file to S3 and trigger a transcoding job # First create some variables we will need to create the S3_File record expiresat = datetime.utcnow().replace(tzinfo=timezone(timedelta(seconds=0))) key = "{0}/{1}/input.dat".format(self.Config.KeyPrefix, S3_File_ESID) # Now we have all the info to create the S3_File record s3file = AWS.S3_File.Create( S3_File_ESID = S3_File_ESID, CreateAddr = "127.0.0.1", CreateUser = -1, OwnerHint = "nobody", Input_FileName = FileInfo.FileName, Input_Size = FileInfo.Size, Input_ContentType = FileInfo.ContentType, Input_Type = Input_Type, Input_Expiration = expiresat ) # We are about to start upload self.S3_UploadStarted({'S3_File_ESID': S3_File_ESID}) # Upload the file to S3 with open(FS[FileInfo.Hash].Path, "rb") as fp: S3.PutObject(session=self.Session, bucket=self.Config.InputBucket, key=key, content=fp, type_=FileInfo.ContentType) # Since file has uploaded we can mark it as ended. # NOTE: this also posts the SQS message to transcode file self.S3_UploadComplete({ "Key": key, "Bucket": self.Config.InputBucket, "S3_File_ESID": S3_File_ESID, }) return s3file ############################################################################### def S3_File_RetriggerJob(self, S3_File_MNID): DB = App.DB # Try to get the file info try: f = AWS.S3_File(S3_File_MNID) except DB.NotOneFound: print("Could not find S3_File<{0}>".format(S3_File_MNID)) return # Save for later Bucket, Key = self.Config.GetBucketAndKeyFromArn(f.Input_Arn) # Delete all related info with DB.Transaction(): if f.Input_Type == 'Document': AWS.S3_File_Document.DeleteAllRecords(S3_File_MNID) elif f.Input_Type == 'Video': AWS.S3_File_Video.DeleteAllRecords(S3_File_MNID) elif f.Input_Type.endswith('Image'): AWS.S3_File_Image.DeleteAllRecords(S3_File_MNID) # Delete all file info f.IsTranscoded = False f.JobSpecification = "{}" f.Input_Arn = "" f.Input_JobArn = "" f.Input_Error = "" f.Save() # Now we retrigger S3_UploadComplete({ "Key": Key, "Bucket": self.Config.InputBucket, "S3_File_ESID": f.S3_File_ESID, })
bucketname = FileInfo['Bucket'] key = FileInfo['Key'] objectarn = "arn:aws:s3:::{0}/{1}".format(bucketname, key) # Get the File object from DB s3file = AWS.S3_File.FindByESID(S3_File_ESID=FileInfo['S3_File_ESID']) s3file.Input_Arn = objectarn # Prepare job parameters jobparams = s3file.PrepareJobParameters(self) if s3file.Input_Type != 'Simple' else '' if jobparams: jobspec = jobparams.ToJSON() # Post message to SQS message = SQS.PostMessage(self.Session, self.Config.QueueUrl, jobspec) jobarn = "" else: jobspec = '{}' message = aadict({"message_id": ""}) jobarn = None # Now we can mark the file as finished uploading. # NOTE: we pass in an empty string for the JobArn so that the pending queries still work s3file.MarkAsEnded(JobArn=jobarn, ObjectArn=objectarn, JobSpecification=jobspec) # That's it. We're ready to return the message id return {"Message": message.message_id}
plot.py
import os from collections import OrderedDict import matplotlib.pyplot as plt import pandas _ramachandran_densities = pandas.read_csv( 'data/rama500-general.data', skiprows=6, delimiter=' ', names=['phi', 'psi', 'value'] ) """ DSSP output: H = α-helix B = residue in isolated β-bridge E = extended strand, participates in β ladder G = 3-helix (310 helix) I = 5 helix (π-helix) T = hydrogen bonded turn S = bend Colors extracted from rcsb.org. """ DSSP_to_color = { 'H': '#ED6161', 'B': '#CCA200', 'E': '#FFFB00', 'G': '#FFC2C2', 'I': '#900000', 'T': '#990099', 'S': '#0000FF', '-': 'black', } def ramachandran_surface(): """
ef ramachandran(torsion_angles, fragment, target_pdb=None, output_writer=None, output_dir=None): """ Plot ramachandran of a set of torsion angles for a given fragment :param torsion_angles: Dictionary with torsion angles phi and psi :param fragment: Fragment identifier, used for displaying purposes """ target_pdb = None plt.figure() ramachandran_surface() plt.title('Ramachandran plot for ' + fragment) plt.scatter( x=torsion_angles['phi'], y=torsion_angles['psi'], s=[1.05 ** x for x in torsion_angles['identity']], c=[DSSP_to_color[ss] for ss in torsion_angles['central_ss']], marker='o', alpha=0.5, ) if target_pdb and (target_pdb in list(torsion_angles['pdb'])): i = list(torsion_angles['pdb']).index(target_pdb) plt.scatter( x=torsion_angles['phi'][i], y=torsion_angles['psi'][i], marker='D', c='red', s=50 ) if output_writer: output_writer.savefig(dpi=150) if output_dir: plt.savefig( os.path.join(output_dir, 'ramachandran', fragment + '.svg'), format='svg', dpi=300 ) plt.close()
Plot density surface for generic ramachandran """ fontsize = 18 ticks = [-180, -90, 0, 90, 180] plt.contourf( list(OrderedDict.fromkeys(_ramachandran_densities['phi'])), list(OrderedDict.fromkeys(_ramachandran_densities['psi'])), _ramachandran_densities['value'].values.reshape(180, 180).T, levels=[0, 0.0005, 0.02, 1], colors=['#FFFFFF', '#B3E8FF', '#7FD9FF'] ) plt.xlabel('$\phi$', fontsize=fontsize) plt.ylabel('$\psi$', fontsize=fontsize) plt.xticks(ticks) plt.yticks(ticks) plt.tick_params(direction="out") plt.margins(0.05) ax = plt.axes() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.spines['left'].set_smart_bounds(True) ax.spines['bottom'].set_smart_bounds(True) ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') d
autofix_lib_test.py
import os import subprocess from unittest import mock import pytest from pre_commit.constants import VERSION as PRE_COMMIT_VERSION import testing.git from all_repos import autofix_lib from all_repos import clone from all_repos import git from all_repos.config import load_config @pytest.mark.parametrize( ('cli_repos', 'expected'), ( (None, ['found_repo']), ([], []), (['cli_repo'], ['cli_repo']), ), ) def test_filter_repos(file_config, cli_repos, expected):
def test_assert_importable_is_importable(): autofix_lib.assert_importable('pre_commit', install='pre-commit') def test_assert_importable_not_importable(): with pytest.raises(SystemExit) as excinfo: autofix_lib.assert_importable('watmodule', install='wat') msg, = excinfo.value.args assert msg == ( 'This tool requires the `watmodule` module to be installed.\n' 'Try installing it via `pip install wat`.' ) def test_require_version_new_enough(): autofix_lib.require_version_gte('pre-commit', '0.17.0') def test_require_version_not_new_enough(): with pytest.raises(SystemExit) as excinfo: autofix_lib.require_version_gte('pre-commit', '999') msg, = excinfo.value.args assert msg == ( f'This tool requires the `pre-commit` package is at least version ' f'999. The currently installed version is {PRE_COMMIT_VERSION}.\n\n' f'Try `pip install --upgrade pre-commit`' ) def test_run(capfd): autofix_lib.run('echo', 'h"i') out, _ = capfd.readouterr() assert out == ( '$ echo \'h"i\'\n' 'h"i\n' ) def test_cwd(tmpdir): orig = os.getcwd() with autofix_lib.cwd(tmpdir): assert os.getcwd() == tmpdir assert os.getcwd() == orig def test_repo_context_success(file_config_files, capsys): expected_rev = testing.git.revparse(file_config_files.dir1) with autofix_lib.repo_context( str(file_config_files.output_dir.join('repo1')), use_color=False, ): assert testing.git.revparse('.') == expected_rev assert git.remote('.') == file_config_files.dir1 out, err = capsys.readouterr() assert err == '' assert 'Errored' not in out def test_repo_context_errors(file_config_files, capsys): with autofix_lib.repo_context( str(file_config_files.output_dir.join('repo1')), use_color=False, ): assert False out, err = capsys.readouterr() assert 'Errored' in out assert 'assert False' in err def test_interactive_control_c(mock_input, capfd): mock_input.set_side_effect(KeyboardInterrupt) with pytest.raises(SystemExit): autofix_lib._interactive_check(use_color=False) out, _ = capfd.readouterr() assert out == ( '***Looks good [y,n,s,q,?]? ^C\n' 'Goodbye!\n' ) def test_interactive_eof(mock_input, capfd): mock_input.set_side_effect(EOFError) with pytest.raises(SystemExit): autofix_lib._interactive_check(use_color=False) out, _ = capfd.readouterr() assert out == ( '***Looks good [y,n,s,q,?]? ^D\n' 'Goodbye!\n' ) def test_interactive_quit(mock_input, capfd): mock_input.set_side_effect('q') with pytest.raises(SystemExit): autofix_lib._interactive_check(use_color=False) out, _ = capfd.readouterr() assert out == ( '***Looks good [y,n,s,q,?]? <<q\n' 'Goodbye!\n' ) def test_interactive_yes(mock_input, capfd): mock_input.set_side_effect('y') assert autofix_lib._interactive_check(use_color=False) is True out, _ = capfd.readouterr() assert out == '***Looks good [y,n,s,q,?]? <<y\n' def test_interactive_no(mock_input, capfd): mock_input.set_side_effect('n') assert autofix_lib._interactive_check(use_color=False) is False out, _ = capfd.readouterr() assert out == '***Looks good [y,n,s,q,?]? <<n\n' def test_interactive_shell(mock_input, capfd): mock_input.set_side_effect('s', 'n') with mock.patch.dict(os.environ, {'SHELL': 'echo'}): assert autofix_lib._interactive_check(use_color=False) is False out, _ = capfd.readouterr() assert out == ( '***Looks good [y,n,s,q,?]? <<s\n' 'Opening an interactive shell, type `exit` to continue.\n' 'Any modifications will be committed.\n' # A newline from echo '\n' '***Looks good [y,n,s,q,?]? <<n\n' ) def test_interactive_help(mock_input, capfd): mock_input.set_side_effect('?', 'n') assert autofix_lib._interactive_check(use_color=False) is False out, _ = capfd.readouterr() assert out == ( '***Looks good [y,n,s,q,?]? <<?\n' 'y (yes): yes it looks good, commit and continue.\n' 'n (no): no, do not commit this repository.\n' 's (shell): open an interactive shell in the repo.\n' 'q (quit, ^C): early exit from the autofixer.\n' '? (help): show this help message.\n' '***Looks good [y,n,s,q,?]? <<n\n' ) def test_interactive_garbage(mock_input, capfd): mock_input.set_side_effect('garbage', 'n') assert autofix_lib._interactive_check(use_color=False) is False out, _ = capfd.readouterr() assert out == ( '***Looks good [y,n,s,q,?]? <<garbage\n' 'Unexpected input: garbage\n' 'y (yes): yes it looks good, commit and continue.\n' 'n (no): no, do not commit this repository.\n' 's (shell): open an interactive shell in the repo.\n' 'q (quit, ^C): early exit from the autofixer.\n' '? (help): show this help message.\n' '***Looks good [y,n,s,q,?]? <<n\n' ) def lower_case_f(): f_contents = open('f').read() with open('f', 'w') as f: f.write(f_contents.lower()) def failing_check_fix(): raise AssertionError('nope!') def test_fix_dry_run_no_change(file_config_files, capfd): autofix_lib.fix( ( str(file_config_files.output_dir.join('repo1')), str(file_config_files.output_dir.join('repo2')), ), apply_fix=lower_case_f, config=load_config(file_config_files.cfg), commit=autofix_lib.Commit('message!', 'test-branch', None), autofix_settings=autofix_lib.AutofixSettings( jobs=1, color=False, limit=None, dry_run=True, interactive=False, ), ) out, err = capfd.readouterr() assert err == '' assert 'Errored' not in out # Showed the diff of what would have happened assert '-OHAI\n+ohai\n' in out assert '-OHELLO\n+ohello\n' in out # Didn't actually perform any changes assert file_config_files.dir1.join('f').read() == 'OHAI\n' assert file_config_files.dir2.join('f').read() == 'OHELLO\n' def test_fix_with_limit(file_config_files, capfd): autofix_lib.fix( ( str(file_config_files.output_dir.join('repo1')), str(file_config_files.output_dir.join('repo2')), ), apply_fix=lower_case_f, config=load_config(file_config_files.cfg), commit=autofix_lib.Commit('message!', 'test-branch', None), autofix_settings=autofix_lib.AutofixSettings( jobs=1, color=False, limit=1, dry_run=True, interactive=False, ), ) out, err = capfd.readouterr() assert err == '' assert 'Errored' not in out # Should still see the diff from the first repository assert '-OHAI\n+ohai\n' in out assert '-OHELLO\n+ohello\n' not in out def test_fix_interactive(file_config_files, capfd, mock_input): mock_input.set_side_effect('y', 'n') autofix_lib.fix( ( str(file_config_files.output_dir.join('repo1')), str(file_config_files.output_dir.join('repo2')), ), apply_fix=lower_case_f, config=load_config(file_config_files.cfg), commit=autofix_lib.Commit('message!', 'test-branch', None), autofix_settings=autofix_lib.AutofixSettings( jobs=1, color=False, limit=None, dry_run=False, interactive=True, ), ) assert file_config_files.dir1.join('f').read() == 'ohai\n' assert file_config_files.dir2.join('f').read() == 'OHELLO\n' def test_autofix_makes_commits(file_config_files, capfd): autofix_lib.fix( ( str(file_config_files.output_dir.join('repo1')), str(file_config_files.output_dir.join('repo2')), ), apply_fix=lower_case_f, config=load_config(file_config_files.cfg), commit=autofix_lib.Commit('message!', 'test-branch', 'A B <[email protected]>'), autofix_settings=autofix_lib.AutofixSettings( jobs=1, color=False, limit=None, dry_run=False, interactive=False, ), ) out, err = capfd.readouterr() assert err == '' assert 'Errored' not in out assert file_config_files.dir1.join('f').read() == 'ohai\n' assert file_config_files.dir2.join('f').read() == 'ohello\n' # The branch name should be what we specified last_commit_msg = subprocess.check_output(( 'git', '-C', file_config_files.dir1, 'log', '--format=%s', '--first-parent', '-1', )).decode() assert last_commit_msg == "Merge branch 'all-repos_autofix_test-branch'\n" # We should see a commit from the autofix change we made commit = subprocess.check_output(( 'git', '-C', file_config_files.dir1, 'log', '--patch', '--grep', 'message!', '--format=%an %ae\n%B', )).decode() assert commit.startswith( 'A B [email protected]\n' 'message!\n' '\n' 'Committed via https://github.com/asottile/all-repos\n', ) assert commit.endswith('-OHAI\n+ohai\n') def test_fix_failing_check_no_changes(file_config_files, capfd): autofix_lib.fix( ( str(file_config_files.output_dir.join('repo1')), str(file_config_files.output_dir.join('repo2')), ), apply_fix=lower_case_f, check_fix=failing_check_fix, config=load_config(file_config_files.cfg), commit=autofix_lib.Commit('message!', 'test-branch', None), autofix_settings=autofix_lib.AutofixSettings( jobs=1, color=False, limit=None, dry_run=False, interactive=False, ), ) out, err = capfd.readouterr() assert 'nope!' in err assert out.count('Errored') == 2 # An error while checking should not allow the changes assert file_config_files.dir1.join('f').read() == 'OHAI\n' assert file_config_files.dir2.join('f').read() == 'OHELLO\n' def test_noop_does_not_commit(file_config_files): rev_before1 = testing.git.revparse(file_config_files.dir1) rev_before2 = testing.git.revparse(file_config_files.dir2) autofix_lib.fix( ( str(file_config_files.output_dir.join('repo1')), str(file_config_files.output_dir.join('repo2')), ), apply_fix=lambda: None, config=load_config(file_config_files.cfg), commit=autofix_lib.Commit('message!', 'test-branch', None), autofix_settings=autofix_lib.AutofixSettings( jobs=1, color=False, limit=None, dry_run=False, interactive=False, ), ) rev_after1 = testing.git.revparse(file_config_files.dir1) rev_after2 = testing.git.revparse(file_config_files.dir2) assert (rev_before1, rev_before2) == (rev_after1, rev_after2) def test_fix_non_default_branch(file_config_non_default): clone.main(('--config-filename', str(file_config_non_default.cfg))) autofix_lib.fix( ( str(file_config_non_default.output_dir.join('repo1')), ), apply_fix=lower_case_f, config=load_config(file_config_non_default.cfg), commit=autofix_lib.Commit('message!', 'test-branch', 'A B <[email protected]>'), autofix_settings=autofix_lib.AutofixSettings( jobs=1, color=False, limit=None, dry_run=False, interactive=False, ), ) assert file_config_non_default.dir1.join('f').read() == 'ohai\n'
ret = autofix_lib.filter_repos( file_config, cli_repos, lambda _: ['found_repo'], ) assert ret == expected
win32.rs
pub struct
(); impl MemoryGenerator { }
MemoryGenerator
icons.go
package theme import "github.com/fyne-io/fyne" type darkLightResource struct { dark *fyne.StaticResource light *fyne.StaticResource } func (res *darkLightResource) Name() string { if fyne.GetSettings().Theme() == "light"
return res.dark.StaticName } func (res *darkLightResource) Content() []byte { if fyne.GetSettings().Theme() == "light" { return res.light.StaticContent } return res.dark.StaticContent } func (res *darkLightResource) CachePath() string { if fyne.GetSettings().Theme() == "light" { return res.light.CachePath() } return res.dark.CachePath() } var cancel, confirm, checked, unchecked *darkLightResource var contentCut, contentCopy, contentPaste *darkLightResource var info, question, warning *darkLightResource func init() { cancel = &darkLightResource{cancelDark, cancelLight} confirm = &darkLightResource{checkDark, checkLight} checked = &darkLightResource{checkboxDark, checkboxLight} unchecked = &darkLightResource{checkboxblankDark, checkboxblankLight} contentCut = &darkLightResource{contentcutDark, contentcutLight} contentCopy = &darkLightResource{contentcopyDark, contentcopyLight} contentPaste = &darkLightResource{contentpasteDark, contentpasteLight} info = &darkLightResource{infoDark, infoLight} question = &darkLightResource{questionDark, questionLight} warning = &darkLightResource{warningDark, warningLight} } // FyneLogo returns a resource containing the Fyne logo func FyneLogo() fyne.Resource { return fynelogo } // CancelIcon returns a resource containing the standard cancel icon for the current theme func CancelIcon() fyne.Resource { return cancel } // ConfirmIcon returns a resource containing the standard confirm icon for the current theme func ConfirmIcon() fyne.Resource { return confirm } // CheckedIcon returns a resource containing the standard checkbox icon for the current theme func CheckedIcon() fyne.Resource { return checked } // UncheckedIcon returns a resource containing the standard checkbox unchecked icon for the current theme func UncheckedIcon() fyne.Resource { return unchecked } // CutIcon returns a resource containing the standard content cut icon for the current theme func CutIcon() fyne.Resource { return contentCut } // CopyIcon returns a resource containing the standard content copy icon for the current theme func CopyIcon() fyne.Resource { return contentCopy } // PasteIcon returns a resource containing the standard content paste icon for the current theme func PasteIcon() fyne.Resource { return contentPaste } // InfoIcon returns a resource containing the standard dialog info icon for the current theme func InfoIcon() fyne.Resource { return info } // QuestionIcon returns a resource containing the standard dialog question icon for the current theme func QuestionIcon() fyne.Resource { return question } // WarningIcon returns a resource containing the standard dialog warning icon for the current theme func WarningIcon() fyne.Resource { return warning }
{ return res.light.StaticName }
config_test.go
/* * Copyright 2020 The Dragonfly Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package config import ( "os" "testing" "time" "github.com/mitchellh/mapstructure" testifyassert "github.com/stretchr/testify/assert" "gopkg.in/yaml.v3" ) func TestConfig_Load(t *testing.T)
{ assert := testifyassert.New(t) config := &Config{ Server: &ServerConfig{ IP: "127.0.0.1", Host: "foo", Port: 8002, ListenLimit: 1000, CacheDir: "foo", LogDir: "bar", }, Scheduler: &SchedulerConfig{ Algorithm: "default", BackSourceCount: 3, RetryLimit: 10, RetryInterval: 1 * time.Second, GC: &GCConfig{ PeerGCInterval: 1 * time.Minute, PeerTTL: 5 * time.Minute, TaskGCInterval: 1 * time.Minute, TaskTTL: 10 * time.Minute, }, }, DynConfig: &DynConfig{ RefreshInterval: 5 * time.Minute, CDNDir: "foo", }, Host: &HostConfig{ IDC: "foo", NetTopology: "bar", Location: "baz", }, Manager: &ManagerConfig{ Enable: true, Addr: "127.0.0.1:65003", SchedulerClusterID: 1, KeepAlive: KeepAliveConfig{ Interval: 5 * time.Second, }, }, Job: &JobConfig{ Enable: true, GlobalWorkerNum: 1, SchedulerWorkerNum: 1, LocalWorkerNum: 5, Redis: &RedisConfig{ Host: "127.0.0.1", Port: 6379, Password: "foo", BrokerDB: 1, BackendDB: 2, }, }, Metrics: &MetricsConfig{ Enable: false, Addr: ":8000", EnablePeerHost: false, }, } schedulerConfigYAML := &Config{} contentYAML, _ := os.ReadFile("./testdata/scheduler.yaml") var dataYAML map[string]interface{} if err := yaml.Unmarshal(contentYAML, &dataYAML); err != nil { t.Fatal(err) } if err := mapstructure.Decode(dataYAML, &schedulerConfigYAML); err != nil { t.Fatal(err) } assert.EqualValues(config, schedulerConfigYAML) }
plugin_embeddedfile.py
#!/usr/bin/env python """ Modified by CSE to fit ASSEMBLYLINE service """ from pdf_id.pdfid.pdfid import cPluginParent, AddPlugin # 2014/10/13 class cPDFiDEmbeddedFile(cPluginParent): # onlyValidPDF = True name = 'EmbeddedFile plugin' def
(self, oPDFiD, options): self.oPDFiD = oPDFiD def Score(self): if '/EmbeddedFile' in self.oPDFiD.keywords and self.oPDFiD.keywords['/EmbeddedFile'].count > 0: if self.oPDFiD.keywords['/EmbeddedFile'].hexcode > 0: return 1.0 else: return 0.9 else: return 0.0 AddPlugin(cPDFiDEmbeddedFile)
__init__
interface.rs
// Take a look at the license at the top of the repository in the LICENSE file. use super::{InitializingType, Property}; use crate::translate::*; use crate::{IsA, Object, ObjectExt, SignalFlags, StaticType, Type, Value}; use std::borrow::Borrow; use std::marker; use std::mem; impl<T: ObjectInterface> InitializingType<T> { /// Adds an interface prerequisite for `I` to the type. /// /// All implementors of the interface must be a subclass of `I` or implement the interface `I`. pub fn add_prerequisite<I: StaticType>(&mut self)
} /// Macro for boilerplate of [`ObjectInterface`] implementations. /// /// [`ObjectInterface`]: subclass/types/trait.ObjectInterface.html #[macro_export] macro_rules! object_interface { () => { fn get_type() -> $crate::Type { static ONCE: std::sync::Once = std::sync::Once::new(); static mut TYPE: $crate::Type = $crate::Type::Invalid; ONCE.call_once(|| { let type_ = $crate::subclass::register_interface::<Self>(); unsafe { TYPE = type_; } }); unsafe { assert_ne!(TYPE, $crate::Type::Invalid); TYPE } } }; } /// The central trait for defining a `GObject` interface. /// /// Links together the type name and the interface struct for type registration and allows to hook /// into various steps of the type registration and initialization. /// /// This must only be implemented on `#[repr(C)]` structs and have `gobject_ffi::GTypeInterface` as /// the first field. /// /// See [`register_interface`] for registering an implementation of this trait /// with the type system. /// /// [`register_interface`]: fn.register_interface.html pub trait ObjectInterface: Sized + 'static { /// `GObject` type name. /// /// This must be unique in the whole process. const NAME: &'static str; /// Returns the `glib::Type` ID of the interface. /// /// This will register the type with the type system on the first call and is usually generated /// by the [`object_interface!`] macro. /// /// [`object_interface!`]: ../../macro.object_interface.html fn get_type() -> Type; /// Additional type initialization. /// /// This is called right after the type was registered and allows /// interfaces to do additional type-specific initialization, e.g. /// for adding prerequisites. /// /// Optional fn type_init(_type_: &mut InitializingType<Self>) {} /// Interface initialization. /// /// This is called after `type_init` and before the first implementor /// of the interface is created. Interfaces can use this to do interface- /// specific initialization, e.g. for installing properties or signals /// on the interface, and for setting default implementations of interface /// functions. /// /// Optional fn interface_init(&mut self) {} } pub trait ObjectInterfaceExt: ObjectInterface { /// Get interface from an instance. /// /// This will panic if `obj` does not implement the interface. fn from_instance<T: IsA<Object>>(obj: &T) -> &Self { assert!(obj.as_ref().get_type().is_a(&Self::get_type())); unsafe { let klass = (*(obj.as_ptr() as *const gobject_ffi::GTypeInstance)).g_class; let interface = gobject_ffi::g_type_interface_peek(klass as *mut _, Self::get_type().to_glib()); assert!(!interface.is_null()); &*(interface as *const Self) } } /// Install properties on the interface. /// /// All implementors of the interface must provide these properties. fn install_properties<'a, T: Borrow<Property<'a>>>(&mut self, properties: &[T]) { if properties.is_empty() { return; } for property in properties { let property = property.borrow(); let pspec = (property.1)(property.0); unsafe { gobject_ffi::g_object_interface_install_property( self as *mut Self as *mut _, pspec.to_glib_none().0, ); } } } /// Add a new signal to the interface. /// /// This can be emitted later by `glib::Object::emit` and external code /// can connect to the signal to get notified about emissions. fn add_signal(&mut self, name: &str, flags: SignalFlags, arg_types: &[Type], ret_type: Type) { unsafe { super::types::add_signal( *(self as *mut _ as *mut ffi::GType), name, flags, arg_types, ret_type, ); } } /// Add a new signal with class handler to the interface. /// /// This can be emitted later by `glib::Object::emit` and external code /// can connect to the signal to get notified about emissions. /// /// The class handler will be called during the signal emission at the corresponding stage. fn add_signal_with_class_handler<F>( &mut self, name: &str, flags: SignalFlags, arg_types: &[Type], ret_type: Type, class_handler: F, ) where F: Fn(&super::SignalClassHandlerToken, &[Value]) -> Option<Value> + Send + Sync + 'static, { unsafe { super::types::add_signal_with_class_handler( *(self as *mut _ as *mut ffi::GType), name, flags, arg_types, ret_type, class_handler, ); } } /// Add a new signal with accumulator to the interface. /// /// This can be emitted later by `glib::Object::emit` and external code /// can connect to the signal to get notified about emissions. /// /// The accumulator function is used for accumulating the return values of /// multiple signal handlers. The new value is passed as second argument and /// should be combined with the old value in the first argument. If no further /// signal handlers should be called, `false` should be returned. fn add_signal_with_accumulator<F>( &mut self, name: &str, flags: SignalFlags, arg_types: &[Type], ret_type: Type, accumulator: F, ) where F: Fn(&super::SignalInvocationHint, &mut Value, &Value) -> bool + Send + Sync + 'static, { unsafe { super::types::add_signal_with_accumulator( *(self as *mut _ as *mut ffi::GType), name, flags, arg_types, ret_type, accumulator, ); } } /// Add a new signal with accumulator and class handler to the interface. /// /// This can be emitted later by `glib::Object::emit` and external code /// can connect to the signal to get notified about emissions. /// /// The accumulator function is used for accumulating the return values of /// multiple signal handlers. The new value is passed as second argument and /// should be combined with the old value in the first argument. If no further /// signal handlers should be called, `false` should be returned. /// /// The class handler will be called during the signal emission at the corresponding stage. fn add_signal_with_class_handler_and_accumulator<F, G>( &mut self, name: &str, flags: SignalFlags, arg_types: &[Type], ret_type: Type, class_handler: F, accumulator: G, ) where F: Fn(&super::SignalClassHandlerToken, &[Value]) -> Option<Value> + Send + Sync + 'static, G: Fn(&super::SignalInvocationHint, &mut Value, &Value) -> bool + Send + Sync + 'static, { unsafe { super::types::add_signal_with_class_handler_and_accumulator( *(self as *mut _ as *mut ffi::GType), name, flags, arg_types, ret_type, class_handler, accumulator, ); } } } impl<T: ObjectInterface> ObjectInterfaceExt for T {} unsafe extern "C" fn interface_init<T: ObjectInterface>( klass: ffi::gpointer, _klass_data: ffi::gpointer, ) { let iface = &mut *(klass as *mut T); iface.interface_init(); } /// Register a `glib::Type` ID for `T`. /// /// This must be called only once and will panic on a second call. /// /// The [`object_interface!`] macro will create a `get_type()` function around this, which will /// ensure that it's only ever called once. /// /// [`object_interface!`]: ../../macro.object_interface.html pub fn register_interface<T: ObjectInterface>() -> Type { unsafe { use std::ffi::CString; let type_name = CString::new(T::NAME).unwrap(); assert_eq!( gobject_ffi::g_type_from_name(type_name.as_ptr()), gobject_ffi::G_TYPE_INVALID ); let type_ = from_glib(gobject_ffi::g_type_register_static_simple( Type::BaseInterface.to_glib(), type_name.as_ptr(), mem::size_of::<T>() as u32, Some(interface_init::<T>), 0, None, 0, )); T::type_init(&mut InitializingType::<T>(type_, marker::PhantomData)); type_ } }
{ unsafe { gobject_ffi::g_type_interface_add_prerequisite( self.0.to_glib(), I::static_type().to_glib(), ) } }
main.go
package main import ( "fmt" "sync" ) // 多个 goroutine 并发操作全局变量 var ( x int64 wg sync.WaitGroup lock sync.Mutex // 互斥锁 ) func add() { for i := 0; i < 5000; i++ { lock.Lock() // 加锁 x = x + 1 lock.Unlock() // 解锁 } wg.Done() } func main() { wg.Add(2) go add() go add()
wg.Wait() fmt.Println(x) }
check_tlc_schemas.py
import boto3 from pyspark.sql import SparkSession s3 = boto3.resource('s3') nyc_tlc = s3.Bucket('nyc-tlc') spark = SparkSession.builder \ .appName('check_tlc_schemas') \ .getOrCreate() for obj in nyc_tlc.objects.all(): key = obj.key if key.startswith('trip data/') and key.endswith('.csv'): path = 's3a://nyc-tlc/' + key csv_df = spark.read.csv( path = path, header = True, inferSchema = True, enforceSchema = False,
ignoreTrailingWhiteSpace = True, samplingRatio = 0.1 ) print(path) csv_df.printSchema()
ignoreLeadingWhiteSpace = True,
log.py
# !/uer/bin/env python3 # coding=utf-8 import datetime import logging import functools import os import traceback import inspect if "logs" in os.listdir('../'): pass else: os.mkdir('../logs') now = datetime.datetime.now().strftime('%Y-%m-%d_%H_%M_%S') _log_fp = "../logs/" + now + ".log" logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename=_log_fp, filemode='w') _console = logging.StreamHandler() _console.setLevel(logging.INFO) formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') _console.setFormatter(formatter) LOGGER = logging.getLogger('czb test') LOGGER.addHandler(_console)
return_value = None @functools.wraps(method) def inner(*args, **kwargs): start = datetime.datetime.now() try: nonlocal return_value return_value = method(*args, **kwargs) except Exception: e = traceback.format_exc() LOGGER.error('Exception:{}'.format(e)) finally: pass end = datetime.datetime.now() delta = end - start LOGGER.info('调用 {}函数;\n 传入参数: {}\n 或许还有: {},\n 返回结果: {} ;\n' .format(inspect.stack()[1][3], str(args), str(kwargs), return_value)) LOGGER.warning('调用 {}函数;\n 时间 {};\n 执行时间 {} ;\n' .format(inspect.stack()[1][3], start, delta, return_value)) return return_value return inner
def logged(method): """创建一个日志装饰器,它会记录所装饰函数的入参和 这是一个很糟糕的代码,需要把logging模块替换为CLog """
util.rs
Hacspec Developers Typechecker and compiler for the Hacspec subset of Rust USAGE: cargo hacspec [FLAGS] [OPTIONS] <CRATE> FLAGS: -v Verbosity --init Creates a '<output>_template' file along with the output --update Merges changes into output file based on the template file OPTIONS: -o <FILE> Name of the F* (.fst), Easycrypt (.ec), or Coq (.v) output file ARGS: CRATE The crate to analyse "; #[allow(dead_code)] pub(crate) fn check_vec<T>(v: Vec<Result<T, ()>>) -> Result<Vec<T>, ()> { if v.iter().all(|t| t.is_ok()) { Ok(v.into_iter().map(|t| t.unwrap()).collect()) } else { Err(()) } }
pub(crate) const APP_USAGE: &'static str = "Hacspec 0.1.0
packager.ts
/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import * as fs from 'fs'; import * as path from 'path'; import * as shx from 'shelljs'; function main(args: string[]): number { // Exit immediately when encountering an error. shx.set('-e'); // Keep track of whether an error has occured so that we can return an appropriate exit code. let errorHasOccured = false; // This utility expects all of its arguments to be specified in a params file generated by // bazel (see https://docs.bazel.build/versions/master/skylark/lib/Args.html#use_param_file). const paramFilePath = args[0]; // Bazel params may be surrounded with quotes function unquoteParameter(s: string) { return s.replace(/^'(.*)'$/, '$1'); } // Parameters are specified in the file one per line. const params = fs.readFileSync(paramFilePath, 'utf-8').split('\n').map(unquoteParameter); const [ // Output directory for the npm package. out, // The package segment of the ng_package rule's label (e.g. 'package/common'). srcDir, // The bazel-bin dir joined with the srcDir (e.g. 'bazel-bin/package.common'). // This is the intended output location for package artifacts. binDir, // The bazel-genfiles dir joined with the srcDir (e.g. 'bazel-bin/package.common'). genfilesDir, // JSON data mapping each entry point to the generated bundle index and // flat module metadata, for example // {"@angular/core": { // "index": "bazel-bin/packages/core/core.js", // "typings": "bazel-bin/packages/core/core.d.ts", // "metadata": "bazel-bin/packages/core/core.metadata.json" // }, // ... // } modulesManifestArg, // Path to the package's README.md. readmeMd, // List of rolled-up flat ES2015 modules fesm2015Arg, // List of rolled-up flat ES5 modules fesm5Arg, // List of individual ES2015 modules esm2015Arg, // List of individual ES5 modules esm5Arg, // List of all UMD bundles generated by rollup. bundlesArg, // List of all files in the ng_package rule's srcs. srcsArg, // List of all type definitions that need to packaged into the ng_package. typeDefinitionsArg, // List of all files in the ng_package rule's data. dataArg, // Path to the package's LICENSE. licenseFile, // List of all dts bundles generated by the API extractor. dtsBundleArg, // The dts bundle file suffix example: '.bundle.d.ts' dtsBundleFileSuffix, ] = params; const fesm2015 = fesm2015Arg.split(',').filter(s => !!s); const fesm5 = fesm5Arg.split(',').filter(s => !!s); const esm2015 = esm2015Arg.split(',').filter(s => !!s); const esm5 = esm5Arg.split(',').filter(s => !!s); const bundles = bundlesArg.split(',').filter(s => !!s); const typeDefinitions = typeDefinitionsArg.split(',').filter(s => !!s); const srcs = srcsArg.split(',').filter(s => !!s); const dataFiles: string[] = dataArg.split(',').filter(s => !!s); const modulesManifest = JSON.parse(modulesManifestArg); const dtsBundles: string[] = dtsBundleArg.split(',').filter(s => !!s); /** * List of known `package.json` fields which provide information about * supported package formats and their associated entry paths. */ const knownFormatPackageJsonFields = ['main', 'fesm2015', 'esm2015', 'typings', 'module', 'es2015']; if (readmeMd) { copyFile(readmeMd, out); } /** * Writes a file into the package based on its input path, relativizing to the package path. * @param inputPath Path to the file in the input tree. * @param fileContent Content of the file. */ function writeFileFromInputPath(inputPath: string, fileContent: string|Buffer) { // We want the relative path from the given file to its ancestor "root" directory. // This root depends on whether the file lives in the source tree (srcDir) as a basic file // input to ng_package, the bin output tree (binDir) as the output of another rule, or // the genfiles output tree (genfilesDir) as the output of a genrule. let rootDir: string; if (inputPath.includes(binDir)) { rootDir = binDir; } else if (inputPath.includes(genfilesDir)) { rootDir = genfilesDir; } else { rootDir = srcDir; } const outputPath = path.join(out, path.relative(rootDir, inputPath)); // Always ensure that the target directory exists. shx.mkdir('-p', path.dirname(outputPath)); fs.writeFileSync(outputPath, fileContent); } /** * Copies a file into the package based on its input path, relativizing to the package path. * @param inputPath a path relative to the binDir, typically from a file in the deps[] */ function copyFileFromInputPath(inputPath: string) { writeFileFromInputPath(inputPath, fs.readFileSync(inputPath)); } /** * Relativize the path where a file is written. * @param file a path containing a re-rooted segment like .esm5 * @param suffix the re-rooted directory * @param outDir path where we copy the file, relative to the out */ function writeEsmFile(file: string, suffix: string, outDir: string) { function relPath(file: string, suffix: string) { if (suffix) { // Note that the specified file path is always using the posix path delimiter. const root = suffix ? file.substr(0, file.lastIndexOf(`${suffix}/`) + suffix.length + 1) : binDir; return path.dirname(path.relative(path.join(root, srcDir), file)); } else { return path.dirname(path.relative(binDir, file)); } } const rel = relPath(file, suffix); if (!rel.startsWith('..')) { copyFile(file, path.join(out, outDir), rel); } } esm2015.forEach(file => writeEsmFile(file, '', 'esm2015')); bundles.forEach(bundle => { copyFile(bundle, out, 'bundles'); }); fesm2015.forEach(file => { copyFile(file, out, 'fesm2015'); }); // Copy all type definitions into the package. This is necessary so that developers can use // the package with type definitions. typeDefinitions.forEach(f => writeFileFromInputPath(f, readTypingsAndStripAmdModule(f))); // Copy all `data` files into the package. These are files that aren't built by the ng_package // rule, but instead are just straight copied into the package, e.g. global CSS assets. dataFiles.forEach(f => copyFileFromInputPath(f)); // Iterate through the entry point modules // We do this first because we also record new paths for the esm5 and esm2015 copies // of the index JS file, which we need to amend the package.json. Object.keys(modulesManifest).forEach(moduleName => { const moduleFiles = modulesManifest[moduleName]; const relative = path.relative(binDir, moduleFiles['index']); moduleFiles['esm5_index'] = path.join(binDir, 'esm5', relative); moduleFiles['esm2015_index'] = path.join(binDir, 'esm2015', relative); // Metadata file is optional as entry-points can be also built // with the "ts_library" rule. const metadataFile = moduleFiles['metadata']; if (!metadataFile) { return; } const typingsOutFile = moduleFiles['typings']; // We only support all modules within a package to be dts bundled // ie: if @angular/common/http has flat dts, so should @angular/common if (dtsBundles.length) { const metadataContent = rewireMetadata(metadataFile, typingsOutFile); writeFileFromInputPath(metadataFile, metadataContent); } else { copyFileFromInputPath(metadataFile); } }); const licenseBanner = licenseFile ? fs.readFileSync(licenseFile, 'utf-8') : ''; dtsBundles.forEach(bundleFile => { const cleanDistPath = bundleFile.replace(dtsBundleFileSuffix, '.d.ts'); // API extractor will not dedupe license comments from various files // this will remove all the license comments and append the license banner. const content = licenseBanner + '\n' + readTypingsAndStripAmdModule(bundleFile) .replace(/(\/\*\*\s+\*\s\@license(((?!\*\/).|\s)*)\*\/)/gm, ''); writeFileFromInputPath(cleanDistPath, content); }); // Root package name (e.g. '@angular/common'), captures as we iterate through sources below. let rootPackageName = ''; const packagesWithExistingPackageJson = new Set<string>(); for (const src of srcs) { if (src.includes(binDir) || src.includes(genfilesDir)) { errorHasOccured = true; console.error( 'The "srcs" for ng_package should not include output of other rules. Found:\n' + ` ${src}`); } let content = fs.readFileSync(src, 'utf-8'); // Modify package.json files as necessary for publishing if (path.basename(src) === 'package.json') { const packageJson = JSON.parse(content); content = amendPackageJson(src, packageJson, false); const packageName = packageJson['name']; packagesWithExistingPackageJson.add(packageName); // Keep track of the root package name, e.g. "@angular/common". We assume that the // root name will be shortest because secondary entry-points will append to it // (e.g. "@angular/common/http"). if (!rootPackageName || packageName.length < rootPackageName.length) { rootPackageName = packageJson['name']; } } writeFileFromInputPath(src, content); } // Generate extra files for secondary entry-points. Object.keys(modulesManifest).forEach(entryPointPackageName => { const entryPointName = entryPointPackageName.substr(rootPackageName.length + 1); if (!entryPointName) return; const metadataFilePath = modulesManifest[entryPointPackageName]['metadata']; if (metadataFilePath) { createMetadataReexportFile( entryPointName, modulesManifest[entryPointPackageName]['metadata'], entryPointPackageName); } createTypingsReexportFile( entryPointName, licenseBanner, modulesManifest[entryPointPackageName]['typings']); if (!packagesWithExistingPackageJson.has(entryPointPackageName)) { createEntryPointPackageJson(entryPointName, entryPointPackageName); } }); return errorHasOccured ? 1 : 0; /** * Convert a binDir-relative path to srcDir-relative * @param from path to a file under the srcDir, like packages/core/testing/package.json * @param file path to a file under the binDir, like bazel-bin/core/testing/generated.js */ function srcDirRelative(from: string, file: string) { const result = normalizeSeparators( path.relative(path.dirname(from), path.join(srcDir, path.relative(binDir, file)))); if (result.startsWith('..')) return result; return `./${result}`; } function copyFile(file: string, baseDir: string, relative = '.') { const dir = path.join(baseDir, relative); // output file is .js if the input file is .mjs const outFile = path.posix.join( dir, path.basename(file.endsWith('.mjs') ? file.replace(/\.mjs$/, '.js') : file)); shx.mkdir('-p', dir); shx.cp(file, outFile); // Double-underscore is used to escape forward slash in FESM filenames. // See ng_package.bzl: // fesm_output_filename = entry_point.replace("/", "__") // We need to unescape these. if (outFile.indexOf('__') >= 0) { const outputPath = path.join(dir, ...path.basename(outFile).split('__')); shx.mkdir('-p', path.dirname(outputPath)); shx.mv(path.join(dir, path.basename(file)), outputPath); // if we are renaming the .js file, we'll also need to update the sourceMappingURL in the file if (outFile.endsWith('.js')) { shx.chmod('+w', outputPath); shx.sed('-i', `${path.basename(file)}.map`, `${path.basename(outputPath)}.map`, outputPath); } } } /** * Inserts or edits properties into the package.json file(s) in the package so that * they point to all the right generated artifacts. * * @param packageJson The path to the package.json file. * @param parsedPackage Parsed package.json content * @param isGeneratedPackageJson Whether the passed package.json has been generated. */ function amendPackageJson( packageJson: string, parsedPackage: {[key: string]: string}, isGeneratedPackageJson: boolean) { const packageName = parsedPackage['name']; const moduleData = modulesManifest[packageName]; // If a package json file has been discovered that does not match any // module in the manifest, we report a warning as most likely the target // is configured incorrectly (e.g. missing `module_name` attribute). if (!moduleData) { // Ideally we should throw here, as we got an entry point that doesn't // have flat module metadata / bundle index, so it may have been an // ng_module that's missing a module_name attribute. // However, @angular/compiler can't be an ng_module, as it's the internals // of the ngc compiler, yet we want to build an ng_package for it. // So ignore package.json files when we are missing data. console.error('WARNING: no module metadata for package', packageName); console.error(' Not updating the package.json file to point to it'); console.error( ' The ng_module for this package is possibly missing the module_name attribute '); return JSON.stringify(parsedPackage, null, 2); } // If we guessed the index paths for a module, and it contains an explicit `package.json` // file that already sets format properties, we skip automatic insertion of format // properties but report a warning in case properties have been set by accident. if (moduleData.guessedPaths && !isGeneratedPackageJson && hasExplicitFormatProperties(parsedPackage)) { console.error('WARNING: `package.json` explicitly sets format properties (like `main`).'); console.error( ' Skipping automatic insertion of format properties as explicit ' + 'format properties are set.'); console.error(' Ignore this warning if explicit properties are set intentionally.'); return JSON.stringify(parsedPackage, null, 2); } // Derive the paths to the files from the hard-coded names we gave them. // TODO(alexeagle): it would be better to transfer this information from the place // where we created the filenames, via the modulesManifestArg parsedPackage['main'] = getBundleName(packageName, 'bundles'); parsedPackage['fesm2015'] = getBundleName(packageName, 'fesm2015'); parsedPackage['esm2015'] = srcDirRelative(packageJson, moduleData['esm2015_index']); parsedPackage['typings'] = srcDirRelative(packageJson, moduleData['typings']); // For now, we point the primary entry points at the fesm files, because of Webpack // performance issues with a large number of individual files. parsedPackage['module'] = parsedPackage['fesm2015']; parsedPackage['es2015'] = parsedPackage['fesm2015']; return JSON.stringify(parsedPackage, null, 2); } // e.g. @angular/common/http/testing -> ../../bundles/common-http-testing.umd.js // or @angular/common/http/testing -> ../../fesm5/http/testing.js function getBundleName(packageName: string, dir: string) { const parts = packageName.split('/'); // Remove the scoped package part, like @angular if present const nameParts = packageName.startsWith('@') ? parts.splice(1) : parts; const relativePath = newArray(nameParts.length - 1, '..').join('/') || '.'; let basename: string; if (dir === 'bundles') { basename = nameParts.join('-') + '.umd'; } else if (nameParts.length === 1) { basename = nameParts[0]; } else { basename = nameParts.slice(1).join('/'); } return [relativePath, dir, basename + '.js'].join('/'); } /** Whether the package explicitly sets any of the format properties (like `main`). */ function hasExplicitFormatProperties(parsedPackage: {[key: string]: string}): boolean { return Object.keys(parsedPackage) .some(propertyName => knownFormatPackageJsonFields.includes(propertyName)); } /** Creates metadata re-export file for a secondary entry-point. */ function createMetadataReexportFile( entryPointName: string, metadataFile: string, packageName: string) { const inputPath = path.join(srcDir, `${entryPointName}.metadata.json`); writeFileFromInputPath(inputPath, JSON.stringify({ '__symbolic': 'module', 'version': 3, 'metadata': {}, 'exports': [{'from': `${srcDirRelative(inputPath, metadataFile.replace(/.metadata.json$/, ''))}`}], 'flatModuleIndexRedirect': true, 'importAs': packageName }) + '\n'); } /** * Creates a typings (d.ts) re-export file for a secondary-entry point, * e.g., `export * from './common/common'` */ function createTypingsReexportFile(entryPointName: string, license: string, typingsFile: string) { const inputPath = path.join(srcDir, `${entryPointName}.d.ts`); const content = `${license} export * from '${srcDirRelative(inputPath, typingsFile.replace(/\.d\.tsx?$/, ''))}'; `; writeFileFromInputPath(inputPath, content); } /** * Creates a package.json for a secondary entry-point. * @param dir The directory under which the package.json should be written. * @param entryPointPackageName The full package name for the entry point, * e.g. '@angular/common/http'. */ function createEntryPointPackageJson(dir: string, entryPointPackageName: string) { const pkgJson = path.join(srcDir, dir, 'package.json'); const content = amendPackageJson(pkgJson, {name: entryPointPackageName}, true); writeFileFromInputPath(pkgJson, content); } /** * Normalizes the specified path by replacing backslash separators with Posix * forward slash separators. */ function
(path: string): string { return path.replace(/\\/g, '/'); } /** * Rewires metadata to point to the flattened dts file. * * @param metadataPath the metadata file path * @param typingsPath the typings bundle entrypoint */ function rewireMetadata(metadataPath: string, typingsPath: string): string { const metadata = JSON.parse(fs.readFileSync(metadataPath, 'utf-8')); let typingsRelativePath = normalizeSeparators(path.relative(path.dirname(metadataPath), typingsPath)); if (!typingsRelativePath.startsWith('..')) { typingsRelativePath = `./${typingsRelativePath}`; } typingsRelativePath = typingsRelativePath.replace('.d.ts', ''); // the regexp here catches all relative paths such as: // ./src/core/foo.d.ts and ../src/core/foo.d.ts const relativePathRegex = /\.?\.\/[\w\.\-_\/]+/g; if (metadata.exports) { // Strip re-exports which are now self-references metadata.exports = metadata.exports.filter((e: {from: string}) => !e.from.match(relativePathRegex)); } return JSON.stringify(metadata).replace(relativePathRegex, typingsRelativePath); } /** * Strip the named AMD module for compatibility with non-bazel users from typings content * @param filePath dts file path */ function readTypingsAndStripAmdModule(filePath: string): string { return fs .readFileSync(filePath, 'utf-8') // Strip the named AMD module for compatibility with non-bazel users .replace(/^\/\/\/ <amd-module name=.*\/>[\r\n]+/gm, ''); } } if (require.main === module) { process.exitCode = main(process.argv.slice(2)); } export function newArray<T = any>(size: number): T[]; export function newArray<T>(size: number, value: T): T[]; export function newArray<T>(size: number, value?: T): T[] { const list: T[] = []; for (let i = 0; i < size; i++) { list.push(value!); } return list; }
normalizeSeparators
jsonpath.go
// Copyright 2020 The Lokomotive Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package testutil import ( "reflect" "testing" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" yamlserializer "k8s.io/apimachinery/pkg/runtime/serializer/yaml" "k8s.io/client-go/util/jsonpath" ) // unstructredObj accepts a Kubernetes manifest in YAML format and returns an object of type // `unstructured.Unstructured`. This object has many methods that can be used by the consumer to // extract metadata from the Kubernetes manifest. func unstructredObj(t *testing.T, yamlObj string) *unstructured.Unstructured { u := &unstructured.Unstructured{} // Decode YAML into `unstructured.Unstructured`. dec := yamlserializer.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) if _, _, err := dec.Decode([]byte(yamlObj), nil, u); err != nil { t.Fatalf("Converting config to unstructured.Unstructured: %v", err) } return u } // valFromObject takes a JSON path as a string and an object of type `unstructured.Unstructured`. // This function returns an object of type `reflect.Value` at that JSON path. func valFromObject(t *testing.T, jp string, obj *unstructured.Unstructured) reflect.Value { jPath := jsonpath.New("parse") if err := jPath.Parse(jp); err != nil { t.Fatalf("Parsing JSONPath: %v", err) } v, err := jPath.FindResults(obj.Object) if err != nil { t.Fatalf("Finding results using JSONPath in the YAML file: %v", err) } if len(v) == 0 || len(v[0]) == 0 { t.Fatalf("No result found") } return v[0][0] } // jsonPathValue extracts an object at a JSON path from a YAML config, and returns an interface // object. func jsonPathValue(t *testing.T, yamlConfig string, jsonPath string) interface{} { u := unstructredObj(t, yamlConfig) got := valFromObject(t, jsonPath, u) switch got.Kind() { //nolint:exhaustive case reflect.Interface: // TODO: Add type switch here for concrete types. return got.Interface() default: t.Fatalf("Extracted object has an unknown type: %v", got.Kind()) } return nil } // MatchJSONPathStringValue is a helper function for component unit tests. It compares the string at // a JSON path in a YAML config to the expected string. func
(t *testing.T, yamlConfig string, jsonPath string, expected string) { obj := jsonPathValue(t, yamlConfig, jsonPath) got, ok := obj.(string) if !ok { t.Fatalf("Value is not string: %#v", obj) } if got != expected { t.Fatalf("Expected: %s, Got: %s", expected, got) } }
MatchJSONPathStringValue
tsocio.js
/**@preserve GeneXus Java 10_3_12-110051 on February 11, 2021 4:24:58.27 */ gx.evt.autoSkip = false; gx.define('tsocio', false, function () { this.ServerClass = "tsocio" ; this.PackageName = "" ; this.setObjectType("trn"); this.setOnAjaxSessionTimeout("Warn"); this.hasEnterEvent = true; this.skipOnEnter = false; this.addKeyListener("20", "LAST"); this.addKeyListener("19", "FIRST"); this.addKeyListener("16", "SELECT"); this.addKeyListener("13", "DELETE"); this.addKeyListener("9", "GET"); this.addKeyListener("2", "PROMPT"); this.addKeyListener("8", "NEXT"); this.addKeyListener("7", "PREVIOUS"); this.addKeyListener("4", "CHECK"); this.addKeyListener("5", "REFRESH"); this.addKeyListener("12", "CANCEL"); this.addKeyListener("1", "HELP"); this.SetStandaloneVars=function() { this.Gx_mode=gx.fn.getControlValue("vMODE") ; }; this.Valid_Socioassociadoempresaid=function() { try { var gxballoon = gx.util.balloon.getNew("SOCIOASSOCIADOEMPRESAID"); this.AnyError = 0; } catch(e){} try { if (gxballoon == null) return true; return gxballoon.show(); } catch(e){} return true ; } this.Valid_Socioassociadoid=function() { gx.ajax.validSrvEvt("dyncall","valid_Socioassociadoid",["gx.O.A10023SocioAssociadoEmpresaId", "gx.O.A10024SocioAssociadoId"],[]); return true; } this.Valid_Socioid=function() { gx.ajax.validSrvEvt("dyncall","valid_Socioid",["gx.O.A10023SocioAssociadoEmpresaId", "gx.O.A10024SocioAssociadoId", "gx.O.A10025SocioId", "gx.O.A10109SocioSnCredenciado", "gx.O.A10110SocioUsuarioAlt", 'gx.date.urlDateTime(gx.O.A10108SocioDataHoraAlt,"DMY4")'],["A10109SocioSnCredenciado", "A10110SocioUsuarioAlt", "A10108SocioDataHoraAlt", "Gx_mode", "Z10023SocioAssociadoEmpresaId", "Z10024SocioAssociadoId", "Z10025SocioId", "Z10109SocioSnCredenciado", "Z10110SocioUsuarioAlt", "Z10108SocioDataHoraAlt", ["BTN_DELETE2","Enabled"], ["BTN_ENTER2","Enabled"]]); return true; } this.e11ev758_client=function() { this.executeServerEvent("ENTER", true, null, false, false); }; this.e12ev758_client=function() { this.executeServerEvent("CANCEL", true, null, false, false); }; this.GXValidFnc = []; var GXValidFnc = this.GXValidFnc ; this.GXCtrlIds=[2,5,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,27,28,34,37,39,42,44,47,49,52,54,57,59,62,64,70]; this.GXLastCtrlId =70; GXValidFnc[2]={fld:"TABLEMAIN",grid:0}; GXValidFnc[5]={fld:"TABLETOOLBAR",grid:0}; GXValidFnc[8]={fld:"SECTIONTOOLBAR",grid:0}; GXValidFnc[9]={fld:"BTN_FIRST",grid:0}; GXValidFnc[10]={fld:"BTN_FIRST_SEPARATOR",grid:0}; GXValidFnc[11]={fld:"BTN_PREVIOUS",grid:0}; GXValidFnc[12]={fld:"BTN_PREVIOUS_SEPARATOR",grid:0}; GXValidFnc[13]={fld:"BTN_NEXT",grid:0}; GXValidFnc[14]={fld:"BTN_NEXT_SEPARATOR",grid:0}; GXValidFnc[15]={fld:"BTN_LAST",grid:0}; GXValidFnc[16]={fld:"BTN_LAST_SEPARATOR",grid:0}; GXValidFnc[17]={fld:"BTN_SELECT",grid:0}; GXValidFnc[18]={fld:"BTN_SELECT_SEPARATOR",grid:0}; GXValidFnc[19]={fld:"BTN_ENTER2",grid:0}; GXValidFnc[20]={fld:"BTN_ENTER2_SEPARATOR",grid:0}; GXValidFnc[21]={fld:"BTN_CANCEL2",grid:0}; GXValidFnc[22]={fld:"BTN_CANCEL2_SEPARATOR",grid:0}; GXValidFnc[23]={fld:"BTN_DELETE2",grid:0}; GXValidFnc[24]={fld:"BTN_DELETE2_SEPARATOR",grid:0}; GXValidFnc[27]={fld:"GROUPDATA",grid:0}; GXValidFnc[28]={fld:"TABLE1",grid:0}; GXValidFnc[34]={fld:"TABLE2",grid:0}; GXValidFnc[37]={fld:"TEXTBLOCKSOCIOASSOCIADOEMPRESAID", format:0,grid:0}; GXValidFnc[39]={lvl:0,type:"char",len:10,dec:0,sign:false,pic:"@!",ro:0,grid:0,gxgrid:null,fnc:this.Valid_Socioassociadoempresaid,isvalid:null,rgrid:[],fld:"SOCIOASSOCIADOEMPRESAID",gxz:"Z10023SocioAssociadoEmpresaId",gxold:"O10023SocioAssociadoEmpresaId",gxvar:"A10023SocioAssociadoEmpresaId",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.A10023SocioAssociadoEmpresaId=Value},v2z:function(Value){gx.O.Z10023SocioAssociadoEmpresaId=Value},v2c:function(){gx.fn.setControlValue("SOCIOASSOCIADOEMPRESAID",gx.O.A10023SocioAssociadoEmpresaId,0)},c2v:function(){gx.O.A10023SocioAssociadoEmpresaId=this.val()},val:function(){return gx.fn.getControlValue("SOCIOASSOCIADOEMPRESAID")},nac:gx.falseFn}; GXValidFnc[42]={fld:"TEXTBLOCKSOCIOASSOCIADOID", format:0,grid:0}; GXValidFnc[44]={lvl:0,type:"int",len:7,dec:0,sign:false,pic:"ZZZZZZ9",ro:0,grid:0,gxgrid:null,fnc:this.Valid_Socioassociadoid,isvalid:null,rgrid:[],fld:"SOCIOASSOCIADOID",gxz:"Z10024SocioAssociadoId",gxold:"O10024SocioAssociadoId",gxvar:"A10024SocioAssociadoId",ucs:[],op:[],ip:[44,39],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.A10024SocioAssociadoId=gx.num.intval(Value)},v2z:function(Value){gx.O.Z10024SocioAssociadoId=gx.num.intval(Value)},v2c:function(){gx.fn.setControlValue("SOCIOASSOCIADOID",gx.O.A10024SocioAssociadoId,0)},c2v:function(){gx.O.A10024SocioAssociadoId=gx.num.intval(this.val())},val:function(){return gx.fn.getIntegerValue("SOCIOASSOCIADOID",'.')},nac:gx.falseFn}; GXValidFnc[47]={fld:"TEXTBLOCKSOCIOID", format:0,grid:0}; GXValidFnc[49]={lvl:0,type:"int",len:7,dec:0,sign:false,pic:"ZZZZZZ9",ro:0,grid:0,gxgrid:null,fnc:this.Valid_Socioid,isvalid:null,rgrid:[],fld:"SOCIOID",gxz:"Z10025SocioId",gxold:"O10025SocioId",gxvar:"A10025SocioId",ucs:[],op:[64,59,54],ip:[64,59,54,49,44,39],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.A10025SocioId=gx.num.intval(Value)},v2z:function(Value){gx.O.Z10025SocioId=gx.num.intval(Value)},v2c:function(){gx.fn.setControlValue("SOCIOID",gx.O.A10025SocioId,0)},c2v:function(){gx.O.A10025SocioId=gx.num.intval(this.val())},val:function(){return gx.fn.getIntegerValue("SOCIOID",'.')},nac:gx.falseFn}; GXValidFnc[52]={fld:"TEXTBLOCKSOCIOSNCREDENCIADO", format:0,grid:0}; GXValidFnc[54]={lvl:0,type:"char",len:1,dec:0,sign:false,pic:"@!",ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"SOCIOSNCREDENCIADO",gxz:"Z10109SocioSnCredenciado",gxold:"O10109SocioSnCredenciado",gxvar:"A10109SocioSnCredenciado",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.A10109SocioSnCredenciado=Value},v2z:function(Value){gx.O.Z10109SocioSnCredenciado=Value},v2c:function(){gx.fn.setControlValue("SOCIOSNCREDENCIADO",gx.O.A10109SocioSnCredenciado,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.A10109SocioSnCredenciado=this.val()},val:function(){return gx.fn.getControlValue("SOCIOSNCREDENCIADO")},nac:gx.falseFn}; this.declareDomainHdlr( 54 , function() { }); GXValidFnc[57]={fld:"TEXTBLOCKSOCIOUSUARIOALT", format:0,grid:0}; GXValidFnc[59]={lvl:0,type:"char",len:12,dec:0,sign:false,pic:"@!",ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"SOCIOUSUARIOALT",gxz:"Z10110SocioUsuarioAlt",gxold:"O10110SocioUsuarioAlt",gxvar:"A10110SocioUsuarioAlt",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.A10110SocioUsuarioAlt=Value},v2z:function(Value){gx.O.Z10110SocioUsuarioAlt=Value},v2c:function(){gx.fn.setControlValue("SOCIOUSUARIOALT",gx.O.A10110SocioUsuarioAlt,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.A10110SocioUsuarioAlt=this.val()},val:function(){return gx.fn.getControlValue("SOCIOUSUARIOALT")},nac:gx.falseFn}; this.declareDomainHdlr( 59 , function() { }); GXValidFnc[62]={fld:"TEXTBLOCKSOCIODATAHORAALT", format:0,grid:0}; GXValidFnc[64]={lvl:0,type:"dtime",len:10,dec:5,sign:false,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"SOCIODATAHORAALT",gxz:"Z10108SocioDataHoraAlt",gxold:"O10108SocioDataHoraAlt",gxvar:"A10108SocioDataHoraAlt",dp:{f:0,st:true,wn:false,mf:false,pic:"99/99/9999 99:99",dec:5},ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(Value){gx.O.A10108SocioDataHoraAlt=gx.fn.toDatetimeValue(Value)},v2z:function(Value){gx.O.Z10108SocioDataHoraAlt=gx.fn.toDatetimeValue(Value)},v2c:function(){gx.fn.setControlValue("SOCIODATAHORAALT",gx.O.A10108SocioDataHoraAlt,0);if (typeof(this.dom_hdl) == 'function') this.dom_hdl.call(gx.O);},c2v:function(){gx.O.A10108SocioDataHoraAlt=gx.fn.toDatetimeValue(this.val())},val:function(){return gx.fn.getDateTimeValue("SOCIODATAHORAALT")},nac:gx.falseFn}; this.declareDomainHdlr( 64 , function() { }); GXValidFnc[70]={fld:"PROMPT_10023_10024",grid:758}; this.A10023SocioAssociadoEmpresaId = "" ; this.Z10023SocioAssociadoEmpresaId = "" ;
this.O10023SocioAssociadoEmpresaId = "" ; this.A10024SocioAssociadoId = 0 ; this.Z10024SocioAssociadoId = 0 ; this.O10024SocioAssociadoId = 0 ; this.A10025SocioId = 0 ; this.Z10025SocioId = 0 ; this.O10025SocioId = 0 ; this.A10109SocioSnCredenciado = "" ; this.Z10109SocioSnCredenciado = "" ; this.O10109SocioSnCredenciado = "" ; this.A10110SocioUsuarioAlt = "" ; this.Z10110SocioUsuarioAlt = "" ; this.O10110SocioUsuarioAlt = "" ; this.A10108SocioDataHoraAlt = gx.date.nullDate() ; this.Z10108SocioDataHoraAlt = gx.date.nullDate() ; this.O10108SocioDataHoraAlt = gx.date.nullDate() ; this.A10023SocioAssociadoEmpresaId = "" ; this.A10024SocioAssociadoId = 0 ; this.A10025SocioId = 0 ; this.A10109SocioSnCredenciado = "" ; this.A10110SocioUsuarioAlt = "" ; this.A10108SocioDataHoraAlt = gx.date.nullDate() ; this.Events = {"e11ev758_client": ["ENTER", true] ,"e12ev758_client": ["CANCEL", true]}; this.EvtParms["ENTER"] = [[{postForm:true}],[]]; this.EvtParms["REFRESH"] = [[],[]]; this.setPrompt("PROMPT_10023_10024", [39,44]); this.EnterCtrl = ["BTN_ENTER2" ,"BTN_ENTER2_SEPARATOR" ,"BTN_ENTER"]; this.setVCMap("Gx_mode", "vMODE", 0, "char"); this.InitStandaloneVars( ); }); gx.setParentObj(new tsocio());
machine_learning_model.py
import utility import static_sim_functions as smf import numpy as np from sklearn.model_selection import train_test_split from sklearn.metrics import * from time_series_grp import TimeSeriesGroupProcessing from RandomNeighbors import RandomNeighbors from sklearn.neighbors import NearestNeighbors from sklearn.model_selection import KFold import ml_modelling_ts as ml_ts ''' This is just a run of the approaches using the methodologies, save the neighborhood for UI. ''' def common_processing(df): # Getting percentage between 0 to 1 rather than score values df["tschq12"] = df["tschq12"].apply(lambda x: x / 100) df["tschq16"] = df["tschq16"].apply(lambda x: x / 100) df["tschq17"] = df["tschq17"].apply(lambda x: x / 100) # Feature engineering family history df["tschq04"] = df.apply(smf.create_cols_family_hist, axis=1) return df def get_common_cols(col1, col2): common_elements = set(col1).intersection(col2) return common_elements import properties import pandas as pd def initial_processing(): # Read the csv of the tschq data and make the necessary things tschq = pd.read_pickle(properties.data_location + "/input_pckl/" + "3_q.pckl") # Cleaning tschq05 question. There is an abstraction for a row we add common value def filter_age(x): if isinstance(x, int): # Append the most common value obtained return tschq["tschq05"].value_counts().head(1).index[0] else: return x tschq["tschq05"] = tschq["tschq05"].apply(filter_age) # Drop the questionnaire_id and created_at tschq.drop(["questionnaire_id", "created_at"], axis=1, inplace=True) # Lets read and join two questionnaires tschq and hq hq = pd.read_pickle("data/input_pckl/4_q.pckl") hq.isna().sum(axis=0) # By looking at the output we are sure that h5 and h6 do not contribute much and can be dropped hq.drop(["hq05", "hq06"], axis=1, inplace=True) hq_df = hq.set_index("user_id") df = tschq.join(hq_df.iloc[:, 2:], on="user_id") drop_cols = ["tschq01", "tschq25", "tschq07-2", "tschq13", "tschq04-1", "tschq04-2"] # Getting percentage between 0 to 1 rather than score values df["tschq12"] = df["tschq12"].apply(lambda x: x / 100) df["tschq16"] = df["tschq16"].apply(lambda x: x / 100) df["tschq17"] = df["tschq17"].apply(lambda x: x / 100) df["tschq04"] = df.apply(smf.create_cols_family_hist, axis=1) df.drop(drop_cols, axis=1, inplace=True) # Set the heom object, while using the required similarity # Alternative # Categorical boolean mask categorical_feature_mask = df.iloc[:, 1:].infer_objects().dtypes == object other_feature_mask = df.iloc[:, 1:].infer_objects().dtypes != object # filter categorical columns using mask and turn it into a list categorical_cols = df.iloc[:, 1:].columns[categorical_feature_mask].tolist() num_cols = df.iloc[:, 1:].columns[other_feature_mask].tolist() cat_idx = [df.iloc[:, 1:].columns.get_loc(val) for val in categorical_cols] num_idx = [df.iloc[:, 1:].columns.get_loc(val) for val in num_cols] return cat_idx, num_idx, df import os import traceback def save_data_objs(df, quest_cmbs="all"): try: if not os.path.isdir(properties.model_location + quest_cmbs): os.makedirs(properties.model_location + quest_cmbs) utility.save_model("".join(quest_cmbs + "/" + quest_cmbs + "_stat_q_data"), df) encoded_combined_df = smf.preprocess(df, quest_cmbs, age_bin=False, process_model_name="".join(quest_cmbs + "/" + quest_cmbs + "_stat_q_data_oe_model"), prediction=False) # Save this encoded_data utility.save_model("".join(quest_cmbs + "/" + quest_cmbs + "_stat_q_data_encoded"), encoded_combined_df) return encoded_combined_df # Use this data to build the data over static data. except Exception: print(traceback.print_exc()) def weighted_average(distress_list):
# Function computes the weighted average as predictions for given prediction time point def compute_weighted_avg(n_idx, encoded_data, pred_at_list, method="mean", dist_nn=None, wt_flag=False): preds = list() # Prediction for four time points for pval in pred_at_list: distress_list = list() for vals in n_idx: u_id = encoded_data["user_id"].iloc[vals] user_ts = tsg_data.get_usr_mday_ts_predict(int(u_id)) # 3rd val of the series is s03 of the neighbor print("{}, {} Values ".format(int(pval), int(u_id))) if len(user_ts) > int(pval): value = user_ts[int(pval), :][3] elif len(user_ts) <= int(pval): value = user_ts[len(user_ts)-1, :][3] distress_list.append(value) if wt_flag: print("Calling by weighted distance prediction for distress") preds.append(weighted_distance_prediction(distress_list, dist_nn)) else: print("Calling weighted average to predict distress") preds.append(weighted_average(distress_list)) return preds def weighted_distance_prediction(p_preds, distance): # Inverse distance so that highest weight is given to the nearest one and least to the farther inv_dist = np.divide(1, distance) #s03 - tinnitus distress weighted by distance is given as s03_pred = (np.sum(np.multiply(p_preds, inv_dist)) / (np.sum(inv_dist))) return s03_pred def compute(test_nn, encoded_data, pred_list, method="mean", dist_nn=None, wt_dist=False): from sklearn.linear_model import LinearRegression preds = list() for point in pred_list: nn_preds = list() intercepts_list = list() coeff_list = list() for nn in test_nn: u_id = encoded_data["user_id"].iloc[nn] user_ts = tsg_data.get_usr_mday_ts_predict(int(u_id)) # Obtain the time series until time point and fit the data for linear regression diff_arr = np.abs(np.subtract(point, user_ts[:, 1])) diff_near_idx = np.where(diff_arr == diff_arr.min()) print("minimum to the time point is at -- ", diff_near_idx) # difference near index. Handling for the length of users usr_idx = diff_near_idx[0][0] user_ts_p = user_ts[:usr_idx] user_ts_df = pd.DataFrame(user_ts_p, columns=["day", "day_sess_index", "s02", "s03", "s04", "s05", "s06", "s07"]) X = user_ts_df[["day_sess_index"]] # We show for tinnitus distress. This can be extended to other physiological variables as well. y = user_ts_df[["s03"]] # Fit on X axis as time and Y as the s03 predictive value. reg_fit = LinearRegression(normalize=True) reg_fit.fit(X, y) # If weighted_distance is true, then predict by each of the nn_user and add to list. This will be used for # calculating weighted_distance_predictions. if wt_dist: nn_pred = reg_fit.predict(np.asarray(point).reshape(1, -1)) nn_preds.append(nn_pred[0][0]) else: intercepts_list.append(reg_fit.intercept_) coeff_list.append(reg_fit.coef_) if wt_dist: print("Predicting the value of s03 for the user by a weighted average weighted by distance") preds.append(weighted_distance_prediction(nn_preds, dist_nn)) else: print("Predicting the value of s3 over the averaged slope and intercepts of " "observations of the neighbors") # y = mx + c, where m is the average slope of the neighbors and c is the average intercept obtained. print("The equation to estimate s03 for the user is {}".format("".join(str(np.asarray(coeff_list).mean())) + "* time_index + " + str(np.asarray(intercepts_list).mean()))) y = np.multiply(np.asarray(coeff_list).mean(), point) + np.asarray(intercepts_list).mean() preds.append(y) return preds def compute_linear_regression(test_nn, encoded_data, pred_list, method="mean"): #test_nn = test_user_nn #pred_list = prediction_at_list from sklearn.linear_model import LinearRegression preds = list() for point in pred_list: attr_list = list() intercepts_list = list() coeff_list = list() for nn in test_nn: u_id = encoded_data["user_id"].iloc[nn] user_ts = tsg_data.get_m_day_ts_enumerate(int(11)) diff_arr = np.abs(np.subtract(point, user_ts[:, 1])) diff_near_idx = np.where(diff_arr == diff_arr.min()) print(diff_near_idx) # difference near index usr_vals = np.array([user_ts[n_id] for n_id in diff_near_idx[0]]) if len(usr_vals) > 1: value = usr_vals.mean(axis=0) print("vavg" + str(value)) else: value = usr_vals[0] print("v" + str(value)) attr_list.append(value) df = pd.DataFrame(user_ts) df.columns = ["day", "day_session_id", "s02", "s03", "s04", "s05", "s06", "s07"] reg_model = LinearRegression(normalize=True) user_x = df[["day_session_id", "s04", "s05", "s06"]].to_numpy() user_s03 = df[["s03"]].to_numpy().ravel() reg_model.fit(user_x, user_s03) intercepts_list.append(reg_model.intercept_) coeff_list.append(reg_model.coef_) # y = mx + c, where m is the average slope of the neighbors and c is the average intercept obtained. # convert coeff's to numpy for manipulations numpy_attr_list = np.array(attr_list) print(numpy_attr_list) avg_np_attr_list = numpy_attr_list[:, 4:].mean(axis=0) print(avg_np_attr_list) numpy_coeff_list = np.array(coeff_list) print(numpy_coeff_list) print(numpy_coeff_list.mean(axis=0)) # Day_index, s02, s04, s05, s06 ,s07 - Use only the fit independent features to estimate the dependent y = np.multiply(numpy_coeff_list[:, 0].mean(), point) + \ np.multiply(numpy_coeff_list[:, 1].mean(), avg_np_attr_list[0]) + \ np.multiply(numpy_coeff_list[:, 2].mean(), avg_np_attr_list[1]) + \ np.multiply(numpy_coeff_list[:, 3].mean(), avg_np_attr_list[2]) + \ np.asarray(intercepts_list).mean() preds.append(y) print(preds) return preds # Create test label as ground truth at prediction point. def create_y_labels(test_data, prediction_at, method="mean"): y_test = list() for i in range(0, len(test_data)): test_ts_test1 = tsg_data.get_usr_mday_ts_predict(int(test_data.iloc[i]["user_id"])) # print(len(test_ts_test1)) if len(test_ts_test1) >= prediction_at: y_test.append(test_ts_test1[prediction_at - 1][2]) elif len(test_ts_test1) < prediction_at: y_test.append(test_ts_test1[len(test_ts_test1) - 1][2]) return y_test # Create reference points for multiple reference predictions def get_pred_ref_points(user_id, ndays, method="mean"): # Using the default tsg which is mean observations of the user test_user_ts = tsg_data.get_usr_mday_ts_predict(user_id) user_ts_idx = test_user_ts[:, 1] # ["date", "time_idx", "s02", "s03", "s04", "s05", "s06", "s07] user_distress = test_user_ts[:, 3] # Near evaluation. Change this for farther evaluations # Near -> 0.20, 0.10 # Far -> 1 - (Near) # Near points are of the sequence of observation because we are sure all stay until here. #prediction_at = 10 # Far prediction point is the last N% of the test user time series # It is tested for 0.75, 0.8, 0.9 prediction_at = round(len(user_ts_idx) * 0.80) y_labels = user_distress[prediction_at:prediction_at + ndays].tolist() prediction_at_list = user_ts_idx[prediction_at:prediction_at + ndays].tolist() return y_labels, prediction_at_list def do_test(test_data, out_writer, csv_out_writer, ndays, near_idxs, encoded_data, fold_count="final", method="mean", dist_nn=None, wt_dist_flag=False): for i in range(0, len(test_data)): user_id = int(test_data.iloc[i]["user_id"]) print("User- Id ", user_id) y_labels, prediction_at_list = get_pred_ref_points(user_id, ndays, method=method) # y_labels = create_y_labels(X_test, preds, method="mean") # Weighting by inverse of neighbor if wt_dist_flag: test_user_nn = near_idxs[i] test_user_dist = dist_nn[i] pred_weighted_average = compute_weighted_avg(test_user_nn, encoded_data, prediction_at_list, method=method, dist_nn=test_user_dist, wt_flag=wt_dist_flag) pred_lr = compute(test_user_nn, encoded_data, prediction_at_list, method=method, dist_nn=test_user_dist, wt_dist=wt_dist_flag) else: test_user_nn = near_idxs[i] pred_weighted_average = compute_weighted_avg(test_user_nn, encoded_data, prediction_at_list, method=method, dist_nn=None, wt_flag=False) pred_lr = compute(test_user_nn, encoded_data, prediction_at_list, method=method, dist_nn=None, wt_dist=False) # calculate if not fold_count == "final": print("Evaluating for the fold-" + str(fold_count) + " for the forecast reference points - " + str(prediction_at_list)) out_writer.write("Evaluating for the forecast reference points -- " + str(prediction_at_list) + "for the method evaluation -- " + str(method) + "\n") else: print("Evaluating for forecast reference points - " + str(prediction_at_list)) out_writer.write("Evaluating over the forecast reference points -- " + str(prediction_at_list) + "for the method evaluation -- " + str(method) + "\n") print("Computing RMSE for weighted average based predictions on the User -- " + str(user_id)) print("---------------------------------------------------------------") out_writer.write("---------------------------------------------------------------\n") print("RMSE -- ", np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) out_writer.write("RMSE -- " + str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + "\n") # Writing to csv file if not fold_count == "final": csv_out_writer.write("".join(str(user_id) + "," + str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + "," + "weighted_average" + "," + str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2]) + "," + str(pred_weighted_average[0]) + "," + str(pred_weighted_average[1]) + "," + str(pred_weighted_average[2]) + "\n")) else: csv_out_writer.write("".join(str(user_id) + "," + str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + "," + "weighted_average" + "," + str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2]) + "," + str(pred_weighted_average[0]) + "," + str(pred_weighted_average[1]) + "," + str(pred_weighted_average[2]) + "\n")) print("-----------------------------------------------------------------------------") out_writer.write("---------------------------------------------------------------\n") print("Computing RMSE for {} {} based predictions for the user -- {}" .format(str("weighted_distance" + str(wt_dist_flag)), str("linear_regression"), str(user_id))) out_writer.write("Computing RMSE for {} {} based predictions for the user -- {} \n" .format(str("weighted_distance" + str(wt_dist_flag)), str("linear_regression"), str(user_id))) print("RMSE -- ", np.sqrt(mean_squared_error(y_labels, pred_lr))) out_writer.write("RMSE -- " + str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + "\n") print("---------------------------------------------------------------") out_writer.write("---------------------------------------------------------------\n") # Write to csv file if not fold_count == "final": csv_out_writer.write("".join(str(user_id) + "," + str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + "," + str("lr") + "," + str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2]) + "," + str(pred_lr[0]) + "," + str(pred_lr[1]) + "," + str( pred_lr[2]) + "\n")) else: csv_out_writer.write("".join(str(user_id) + "," + str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + "," + str("lr") + "," + str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2]) + "," + str(pred_lr[0]) + "," + str(pred_lr[1]) + "," + str( pred_lr[2]) + "\n")) # Change method and execute to get the predictions appropriately, these are configurations eval_method = "mean" # Default day readings for all test users must be at mean and prediction are between min - mean - max tsg_data = TimeSeriesGroupProcessing(method=eval_method) # For all combinations evaluation it must be set to True quest_cmb_all = False # Same random state needs to be maintained to get consistent test data over all combinations and repeatable results random_state = 1220 # It is the setting to get the ahead prediction for tinnitus distress, 3 here means for 3 days # min it is a day and max of about 60days between points which is not an usual scenario ndays = 3 # Build the default NN with all the combination. if not quest_cmb_all: for key, val in properties.quest_comb.items(): # Build NN for each category print("Building NN for the question combination -- " + str(key)) cat_idx, num_idx, combined_df = smf.initial_processing(key, val, append_synthethic=False) # Build and get the knn NN for prediction over test instances. # Save the data objs encoded_data = save_data_objs(combined_df, key) out_writer = open("".join("output/output_" + str(key) + "_" + str(eval_method) + "_heom_norm.txt"), "w+") csv_out_writer = open("".join("output/output_" + str(key) + "_" + str(eval_method) + "_heom_norm.csv"), "w+") csv_out_writer.write("".join("user_id,rmse,algorithm," "ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\n")) #Create a test set X, test = train_test_split(encoded_data, test_size=0.20, random_state=random_state) def filter_train_ids(x): # print(x) if x["user_id"] in train_user_ids: return x def filter_test_ids(x): # print(x) if x["user_id"] in test_user_ids: return x train_user_ids = X["user_id"].to_list() X_train_data_ui = combined_df.apply(filter_train_ids, axis=1, result_type="broadcast").dropna() X_train_data_ui["user_id"] = X_train_data_ui["user_id"].apply(int) # Save the non encoded train data for visualization purposes utility.save_model("".join(key + "/" + key + "_train_stat_q_data"), X_train_data_ui) # filter and get the data to show to the UI for the test data. test_user_ids = test["user_id"].to_list() X_test_data_ui = combined_df.apply(filter_test_ids, axis=1, result_type="broadcast").dropna() X_test_data_ui["user_id"] = X_test_data_ui["user_id"].apply(int) # Save the data_ui object as json #test_data = {} #test_data["users"] = X_test_data_ui.to_dict("r") #utility.save_data("".join("test_data_ui_" + key), test_data) from HEOM import HEOM # Can be done at prediction too. from sklearn.metrics.pairwise import cosine_distances from sklearn.linear_model import LinearRegression from scipy.spatial.distance import pdist, squareform from scipy.stats import zscore heom = HEOM(X.to_numpy(), cat_idx, num_idx) sim_matrix = pdist(X.to_numpy()[:, 1:], heom.heom_distance) mean_heom_distance = sim_matrix.mean() knn = NearestNeighbors(n_neighbors=5, metric=heom.heom_distance, radius=mean_heom_distance) knn.fit(X.iloc[:, 1:]) dist, test_idx = knn.kneighbors(test.to_numpy()[:, 1:], n_neighbors=5) # Execute without any varying for saving the KNN as pickle to be used by UI do_test(test, out_writer, csv_out_writer, ndays, test_idx, X, fold_count="final", method=eval_method, dist_nn=None, wt_dist_flag=False) utility.save_model("".join(key + "/" + "knn_static"), knn) utility.save_model("".join(key + "/" + "train_sim_data.pckl"), X) out_writer.close() csv_out_writer.close() # All feature combinations cat_idx, num_idx, combined_df = initial_processing() # Build KNN for each category print("Building KNN for the question combination -- " + str("overall")) # Save the data objs encoded_data = save_data_objs(combined_df, "overall") X, test = train_test_split(encoded_data, test_size=0.20, random_state=random_state) def filter_train_ids(x): # print(x) if x["user_id"] in train_user_ids: return x def filter_test_ids(x): # print(x) if x["user_id"] in test_user_ids: return x train_user_ids = X["user_id"].to_list() X_train_data_ui = combined_df.apply(filter_train_ids, axis=1, result_type="broadcast").dropna() X_train_data_ui["user_id"] = X_train_data_ui["user_id"].apply(int) # Save in overall. utility.save_model("".join("overall" + "/" + "overall" + "_train_stat_q_data"), X_train_data_ui) # filter and get the data to show to the UI for the test data. test_user_ids = test["user_id"].to_list() X_test_data_ui = combined_df.apply(filter_test_ids, axis=1, result_type="broadcast").dropna() X_test_data_ui["user_id"] = X_test_data_ui["user_id"].apply(int) # Save the data_ui object as json test_data = {} test_data["users"] = X_test_data_ui.to_dict("r") utility.save_data("test_data_ui_x_test", test_data) # Save the results to out_writer out_writer = open("output/overall_output_folds_" + str(eval_method) + ".txt", "w+") csv_out_writer = open("output/overall_output_folds_" + str(eval_method) + ".csv", "w+") # First get the time series for a given test patient and the reference point and iterate to evaluate csv_out_writer.write("user_id,rmse,algorithm," "ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\n") # Split the data into train and test from sklearn.model_selection import train_test_split import utility from HEOM import HEOM #Can be done at prediction too. from sklearn.metrics.pairwise import cosine_distances from sklearn.linear_model import LinearRegression from scipy.spatial.distance import pdist, squareform from scipy.stats import zscore heom = HEOM(X.to_numpy()[:, 1:], cat_idx, num_idx) sim_matrix = pdist(X.to_numpy()[:, 1:], heom.heom_distance) mean_heom_distance = sim_matrix.mean() knn = NearestNeighbors(n_neighbors=5, metric=heom.heom_distance, radius=mean_heom_distance) knn.fit(X.to_numpy()[:, 1:]) dist, idx_test = knn.kneighbors(test.to_numpy()[:, 1:], n_neighbors=5) # First get the time series for a given test patient and the reference point and iterate to evaluate do_test(test, out_writer, csv_out_writer, ndays, idx_test, X, fold_count="final", method=eval_method, dist_nn=None, wt_dist_flag=False) out_writer.close() csv_out_writer.close() # End save the nearest neighbor as data objects, so that can be used from the UI utility.save_model("".join("overall/" + "knn_static"), knn) utility.save_model("".join("overall" + "/" + "train_sim_data.pckl"), X) ''' ML Modelling based on s02 - loudness. Note: This has to be run once the all feature execution is completed since we build upon a custom similarity matrix, it is essential that the same split of train test happen so that it can be verified from the application. ''' # Create train and test containing same users in train and test as per static data. (Note: Run above code and then this # because same set of train test users are used) def splitData(dataset, test_user_ids): train_data = dataset[~dataset["user_id"].isin(test_user_ids)] test_data = dataset[dataset["user_id"].isin(test_user_ids)] return train_data, test_data # Save both train and test matrix def save_ts_objs(train, test, location_name): try: if not os.path.isdir(properties.model_location + location_name): os.makedirs(properties.model_location + location_name) utility.save_model("".join(location_name + "/" + location_name + "_train_data"), train) utility.save_model("".join(location_name + "/" + location_name + "_test_data"), test) except Exception: print(traceback.print_exc()) X = ml_ts.process_data(grouping="day") # Calculate pairwise distance and create a dataframe for the same from scipy.spatial.distance import pdist, squareform # Cross validate here based on the same split of static data here. # Note: Only one combination will be present C = np.zeros((X.shape[0], X.shape[0])) for i in range(0, len(X)): for j in range(0, len(X)): dist = ml_ts.compute_dist(X[:, 1][i], X[:, 1][j]) C[i][j] = dist C_df = pd.DataFrame(C) #C_df.to_csv("sim_ema.csv") # Threshold overall distance for making within radius threshold_distance = sum(C_df.mean())/len(C_df) user_ids = [] for val in X: user_ids.append(val[0]) C_df["user_id"] = user_ids train_data, test_data = splitData(C_df, test_user_ids) # Save the time series data objects as dynamic_ts into model folder save_ts_objs(train_data, test_data, "dynamic_ts") out_writer = open("".join("output/output_ema_" + str(eval_method) + "_.txt"), "w+") csv_out_writer = open("".join("output/output_ema_" + str(eval_method) + "_.csv"), "w+") csv_out_writer.write("user_id,rmse,algorithm," "ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\n") # Test on the final test set. Note there is no varying K just to save the NN here. # It should be noted we use NearesetNeighbors and not KNearestNeighbors classifier. knn_ema = NearestNeighbors(n_neighbors=5, metric="precomputed", radius=threshold_distance) knn_ema.fit(train_data[train_data.index]) ema_dist, ema_idx = knn_ema.kneighbors(test_data[train_data.index], n_neighbors=5) # First get the time series for a given test patient and the reference point and iterate to evaluate do_test(test_data, out_writer, csv_out_writer, ndays, ema_idx, encoded_data, fold_count="final", method=eval_method, dist_nn=None, wt_dist_flag=False) # Close the writers out_writer.close() csv_out_writer.close() # Save the similarity search index KNN utility.save_model("".join("dynamic_ts" + "/" + "dynamic_ts" + "_knn"), knn_ema)
average = np.asarray(distress_list, dtype=float).mean() return average
scope.rs
use crate::kurbo::{Point, Rect}; use crate::{ BoxConstraints, Data, Env, Event, EventCtx, LayoutCtx, Lens, LifeCycle, LifeCycleCtx, PaintCtx, Size, UpdateCtx, Widget, WidgetPod, }; use std::marker::PhantomData; /// A policy that controls how a [`Scope`] will interact with its surrounding /// application data. Specifically, how to create an initial State from the /// input, and how to synchronise the two using a [`ScopeTransfer`]. /// /// [`Scope`]: struct.Scope.html /// [`ScopeTransfer`]: trait.ScopeTransfer.html pub trait ScopePolicy { /// The type of data that comes in from the surrounding application or scope. type In: Data; /// The type of data that the `Scope` will maintain internally. /// This will usually be larger than the input data, and will embed the input data. type State: Data; /// The type of transfer that will be used to synchronise internal and application state type Transfer: ScopeTransfer<In = Self::In, State = Self::State>; /// Make a new state and transfer from the input. /// /// This consumes the policy, so non-cloneable items can make their way /// into the state this way. fn create(self, inner: &Self::In) -> (Self::State, Self::Transfer); } /// A `ScopeTransfer` knows how to synchronise input data with its counterpart /// within a [`Scope`]. /// /// It is separate from the policy mainly to allow easy use of lenses to do /// synchronisation, with a custom [`ScopePolicy`]. /// /// [`Scope`]: struct.Scope.html /// [`ScopePolicy`]: trait.ScopePolicy.html pub trait ScopeTransfer { /// The type of data that comes in from the surrounding application or scope. type In: Data; /// The type of data that the Scope will maintain internally. type State: Data; /// Replace the input we have within our State with a new one from outside fn read_input(&self, state: &mut Self::State, inner: &Self::In); /// Take the modifications we have made and write them back /// to our input. fn write_back_input(&self, state: &Self::State, inner: &mut Self::In); } /// A default implementation of [`ScopePolicy`] that takes a function and a transfer. /// /// [`ScopePolicy`]: trait.ScopePolicy.html pub struct DefaultScopePolicy<F: FnOnce(Transfer::In) -> Transfer::State, Transfer: ScopeTransfer> { make_state: F, transfer: Transfer, } impl<F: FnOnce(Transfer::In) -> Transfer::State, Transfer: ScopeTransfer> DefaultScopePolicy<F, Transfer> { /// Create a `ScopePolicy` from a factory function and a `ScopeTransfer`. pub fn new(make_state: F, transfer: Transfer) -> Self { DefaultScopePolicy { make_state, transfer, } } } impl<F: FnOnce(In) -> State, L: Lens<State, In>, In: Data, State: Data> DefaultScopePolicy<F, LensScopeTransfer<L, In, State>> { /// Create a `ScopePolicy` from a factory function and a lens onto that /// `Scope`'s state. pub fn from_lens(make_state: F, lens: L) -> Self { Self::new(make_state, LensScopeTransfer::new(lens)) } } impl<F: Fn(Transfer::In) -> Transfer::State, Transfer: ScopeTransfer> ScopePolicy for DefaultScopePolicy<F, Transfer> { type In = Transfer::In; type State = Transfer::State; type Transfer = Transfer; fn create(self, inner: &Self::In) -> (Self::State, Self::Transfer) { let state = (self.make_state)(inner.clone()); (state, self.transfer) } } /// A `ScopeTransfer` that uses a Lens to synchronise between a large internal /// state and a small input. pub struct LensScopeTransfer<L: Lens<State, In>, In, State> { lens: L, phantom_in: PhantomData<In>, phantom_state: PhantomData<State>, } impl<L: Lens<State, In>, In, State> LensScopeTransfer<L, In, State> { /// Create a `ScopeTransfer` from a Lens onto a portion of the `Scope`'s state. pub fn new(lens: L) -> Self { LensScopeTransfer { lens, phantom_in: PhantomData::default(), phantom_state: PhantomData::default(), } } } impl<L: Lens<State, In>, In: Data, State: Data> ScopeTransfer for LensScopeTransfer<L, In, State> { type In = In; type State = State; fn read_input(&self, state: &mut State, data: &In) { self.lens.with_mut(state, |inner| { if !inner.same(&data)
}); } fn write_back_input(&self, state: &State, data: &mut In) { self.lens.with(state, |inner| { if !inner.same(&data) { *data = inner.clone(); } }); } } enum ScopeContent<SP: ScopePolicy> { Policy { policy: Option<SP>, }, Transfer { state: SP::State, transfer: SP::Transfer, }, } /// A widget that allows encapsulation of application state. /// /// This is useful in circumstances where /// * A (potentially reusable) widget is composed of a tree of multiple cooperating child widgets /// * Those widgets communicate amongst themselves using Druid's reactive data mechanisms /// * It is undesirable to complicate the surrounding application state with the internal details /// of the widget. /// /// /// Examples include: /// * In a tabs widget composed of a tab bar, and a widget switching body, those widgets need to /// cooperate on which tab is selected. However not every user of a tabs widget wishes to /// encumber their application state with this internal detail - especially as many tabs widgets may /// reasonably exist in an involved application. /// * In a table/grid widget composed of various internal widgets, many things need to be synchronised. /// Scroll position, heading moves, drag operations, sort/filter operations. For many applications /// access to this internal data outside of the table widget isn't needed. /// For this reason it may be useful to use a Scope to establish private state. /// /// A scope embeds some input state (from its surrounding application or parent scope) /// into a larger piece of internal state. This is controlled by a user provided policy. /// /// The ScopePolicy needs to do two things /// a) Create a new scope from the initial value of its input, /// b) Provide two way synchronisation between the input and the state via a ScopeTransfer /// /// Convenience methods are provided to make a policy from a function and a lens. /// It may sometimes be advisable to implement ScopePolicy directly if you need to /// mention the type of a Scope. /// /// # Examples /// ``` /// use druid::{Data, Lens, WidgetExt}; /// use druid::widget::{TextBox, Scope}; /// #[derive(Clone, Data, Lens)] /// struct AppState { /// name: String, /// } /// /// #[derive(Clone, Data, Lens)] /// struct PrivateState { /// text: String, /// other: u32, /// } /// /// impl PrivateState { /// pub fn new(text: String) -> Self { /// PrivateState { text, other: 0 } /// } /// } /// /// fn main() { /// let scope = Scope::from_lens( /// PrivateState::new, /// PrivateState::text, /// TextBox::new().lens(PrivateState::text), /// ); /// } /// ``` pub struct Scope<SP: ScopePolicy, W: Widget<SP::State>> { content: ScopeContent<SP>, inner: WidgetPod<SP::State, W>, } impl<SP: ScopePolicy, W: Widget<SP::State>> Scope<SP, W> { /// Create a new scope from a policy and an inner widget pub fn new(policy: SP, inner: W) -> Self { Scope { content: ScopeContent::Policy { policy: Some(policy), }, inner: WidgetPod::new(inner), } } fn with_state<V>( &mut self, data: &SP::In, mut f: impl FnMut(&mut SP::State, &mut WidgetPod<SP::State, W>) -> V, ) -> V { match &mut self.content { ScopeContent::Policy { policy } => { // We know that the policy is a Some - it is an option to allow // us to take ownership before replacing the content. let (mut state, policy) = policy.take().unwrap().create(data); let v = f(&mut state, &mut self.inner); self.content = ScopeContent::Transfer { state, transfer: policy, }; v } ScopeContent::Transfer { ref mut state, transfer, } => { transfer.read_input(state, data); f(state, &mut self.inner) } } } fn write_back_input(&mut self, data: &mut SP::In) { if let ScopeContent::Transfer { state, transfer } = &mut self.content { transfer.write_back_input(state, data) } } } impl< F: Fn(Transfer::In) -> Transfer::State, Transfer: ScopeTransfer, W: Widget<Transfer::State>, > Scope<DefaultScopePolicy<F, Transfer>, W> { /// Create a new policy from a function creating the state, and a ScopeTransfer synchronising it pub fn from_function(make_state: F, transfer: Transfer, inner: W) -> Self { Self::new(DefaultScopePolicy::new(make_state, transfer), inner) } } impl<In: Data, State: Data, F: Fn(In) -> State, L: Lens<State, In>, W: Widget<State>> Scope<DefaultScopePolicy<F, LensScopeTransfer<L, In, State>>, W> { /// Create a new policy from a function creating the state, and a Lens synchronising it pub fn from_lens(make_state: F, lens: L, inner: W) -> Self { Self::new(DefaultScopePolicy::from_lens(make_state, lens), inner) } } impl<SP: ScopePolicy, W: Widget<SP::State>> Widget<SP::In> for Scope<SP, W> { fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut SP::In, env: &Env) { self.with_state(data, |state, inner| inner.event(ctx, event, state, env)); self.write_back_input(data); ctx.request_update() } fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &SP::In, env: &Env) { self.with_state(data, |state, inner| inner.lifecycle(ctx, event, state, env)); } fn update(&mut self, ctx: &mut UpdateCtx, _old_data: &SP::In, data: &SP::In, env: &Env) { self.with_state(data, |state, inner| inner.update(ctx, state, env)); } fn layout( &mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, data: &SP::In, env: &Env, ) -> Size { self.with_state(data, |state, inner| { let size = inner.layout(ctx, bc, state, env); inner.set_layout_rect(ctx, state, env, Rect::from_origin_size(Point::ORIGIN, size)); size }) } fn paint(&mut self, ctx: &mut PaintCtx, data: &SP::In, env: &Env) { self.with_state(data, |state, inner| inner.paint_raw(ctx, state, env)); } }
{ *inner = data.clone() }
captive_core_backend_test.go
package ledgerbackend import ( "context" "encoding/hex" "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stellar/go/historyarchive" "github.com/stellar/go/network" "github.com/stellar/go/support/errors" "github.com/stellar/go/xdr" ) // TODO: test frame decoding // TODO: test from static base64-encoded data type stellarCoreRunnerMock struct { mock.Mock } func (m *stellarCoreRunnerMock) context() context.Context { a := m.Called() return a.Get(0).(context.Context) } func (m *stellarCoreRunnerMock) catchup(from, to uint32) error { a := m.Called(from, to) return a.Error(0) } func (m *stellarCoreRunnerMock) runFrom(from uint32, hash string) error { a := m.Called(from, hash) return a.Error(0) } func (m *stellarCoreRunnerMock) getMetaPipe() <-chan metaResult { a := m.Called() return a.Get(0).(<-chan metaResult) } func (m *stellarCoreRunnerMock) getProcessExitError() (bool, error) { a := m.Called() return a.Bool(0), a.Error(1) } func (m *stellarCoreRunnerMock) close() error { a := m.Called() return a.Error(0) } func buildLedgerCloseMeta(header testLedgerHeader) xdr.LedgerCloseMeta { opResults := []xdr.OperationResult{} opMeta := []xdr.OperationMeta{} tmpHash, _ := hex.DecodeString("cde54da3901f5b9c0331d24fbb06ac9c5c5de76de9fb2d4a7b86c09e46f11d8c") var hash [32]byte copy(hash[:], tmpHash) var ledgerHash [32]byte if header.hash != "" { tmpHash, err := hex.DecodeString(header.hash) if err != nil { panic(err) } copy(ledgerHash[:], tmpHash) } var previousLedgerHash [32]byte if header.hash != "" { tmpHash, err := hex.DecodeString(header.previousLedgerHash) if err != nil { panic(err) } copy(previousLedgerHash[:], tmpHash) } source := xdr.MustAddress("GAEJJMDDCRYF752PKIJICUVL7MROJBNXDV2ZB455T7BAFHU2LCLSE2LW") return xdr.LedgerCloseMeta{ V: 0, V0: &xdr.LedgerCloseMetaV0{ LedgerHeader: xdr.LedgerHeaderHistoryEntry{ Hash: ledgerHash, Header: xdr.LedgerHeader{ LedgerSeq: xdr.Uint32(header.sequence), PreviousLedgerHash: previousLedgerHash, }, }, TxSet: xdr.TransactionSet{ Txs: []xdr.TransactionEnvelope{ { Type: xdr.EnvelopeTypeEnvelopeTypeTx, V1: &xdr.TransactionV1Envelope{ Tx: xdr.Transaction{ SourceAccount: source.ToMuxedAccount(), Fee: xdr.Uint32(header.sequence), }, }, }, }, }, TxProcessing: []xdr.TransactionResultMeta{ { Result: xdr.TransactionResultPair{ TransactionHash: xdr.Hash(hash), Result: xdr.TransactionResult{ FeeCharged: xdr.Int64(header.sequence), Result: xdr.TransactionResultResult{ Code: xdr.TransactionResultCodeTxSuccess, Results: &opResults, }, }, }, TxApplyProcessing: xdr.TransactionMeta{ Operations: &opMeta, }, }, }, }, } } type testLedgerHeader struct { sequence uint32 hash string previousLedgerHash string } func TestCaptiveNew(t *testing.T) { executablePath := "/etc/stellar-core" networkPassphrase := network.PublicNetworkPassphrase historyURLs := []string{"http://history.stellar.org/prd/core-live/core_live_001"} captiveStellarCore, err := NewCaptive( CaptiveCoreConfig{ BinaryPath: executablePath, NetworkPassphrase: networkPassphrase, HistoryArchiveURLs: historyURLs, }, ) assert.NoError(t, err) assert.Equal(t, uint32(0), captiveStellarCore.nextLedger) assert.NotNil(t, captiveStellarCore.archive) } func TestCaptivePrepareRange(t *testing.T) { metaChan := make(chan metaResult, 100) // Core will actually start with the last checkpoint before the from ledger // and then rewind to the `from` ledger. for i := 64; i <= 100; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(100), uint32(200)).Return(nil).Once() mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) cancelCalled := false captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), cancel: context.CancelFunc(func() { cancelCalled = true }), } err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) assert.NoError(t, err) mockRunner.On("close").Return(nil).Once() err = captiveBackend.Close() assert.NoError(t, err) assert.True(t, cancelCalled) mockRunner.AssertExpectations(t) mockArchive.AssertExpectations(t) } func TestCaptivePrepareRangeCrash(t *testing.T) { metaChan := make(chan metaResult) close(metaChan) ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(100), uint32(200)).Return(nil).Once() mockRunner.On("getProcessExitError").Return(true, errors.New("exit code -1")) mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("close").Return(nil).Once() mockRunner.On("context").Return(ctx) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) assert.EqualError(t, err, "Error fast-forwarding to 100: stellar core exited unexpectedly: exit code -1") mockRunner.AssertExpectations(t) mockArchive.AssertExpectations(t) } func TestCaptivePrepareRangeTerminated(t *testing.T) { metaChan := make(chan metaResult, 100) // Core will actually start with the last checkpoint before the from ledger // and then rewind to the `from` ledger. for i := 64; i <= 100; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } close(metaChan) ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(100), uint32(200)).Return(nil).Once() mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) assert.NoError(t, err) mockRunner.AssertExpectations(t) mockArchive.AssertExpectations(t) } func TestCaptivePrepareRangeCloseNotFullyTerminated(t *testing.T) { metaChan := make(chan metaResult, 100) for i := 64; i <= 100; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx, cancel := context.WithCancel(context.Background()) mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(100), uint32(200)).Return(nil).Twice() mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockRunner.On("close").Return(nil) mockRunner.On("getProcessExitError").Return(true, nil) mockRunner.On("getProcessExitError").Return(false, nil) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) assert.NoError(t, err) // Simulates a long (but graceful) shutdown... cancel() err = captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) assert.NoError(t, err) mockRunner.AssertExpectations(t) mockArchive.AssertExpectations(t) } func TestCaptivePrepareRange_ErrClosingSession(t *testing.T) { ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("close").Return(fmt.Errorf("transient error")) mockRunner.On("getProcessExitError").Return(false, nil) mockRunner.On("context").Return(ctx) captiveBackend := CaptiveStellarCore{ nextLedger: 300, stellarCoreRunner: mockRunner, } err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) assert.EqualError(t, err, "error starting prepare range: error closing existing session: transient error") err = captiveBackend.PrepareRange(ctx, UnboundedRange(64)) assert.EqualError(t, err, "error starting prepare range: error closing existing session: transient error") mockRunner.AssertExpectations(t) } func TestCaptivePrepareRange_ErrGettingRootHAS(t *testing.T) { ctx := context.Background() mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{}, errors.New("transient error")) captiveBackend := CaptiveStellarCore{ archive: mockArchive, } err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) assert.EqualError(t, err, "error starting prepare range: opening subprocess: error getting latest checkpoint sequence: error getting root HAS: transient error") err = captiveBackend.PrepareRange(ctx, UnboundedRange(100)) assert.EqualError(t, err, "error starting prepare range: opening subprocess: error getting latest checkpoint sequence: error getting root HAS: transient error") mockArchive.AssertExpectations(t) } func TestCaptivePrepareRange_FromIsAheadOfRootHAS(t *testing.T)
func TestCaptivePrepareRange_ToIsAheadOfRootHAS(t *testing.T) { mockRunner := &stellarCoreRunnerMock{} mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(192), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(context.Background(), BoundedRange(100, 200)) assert.EqualError(t, err, "error starting prepare range: opening subprocess: to sequence: 200 is greater than max available in history archives: 192") mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestCaptivePrepareRange_ErrCatchup(t *testing.T) { mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(100), uint32(192)).Return(errors.New("transient error")).Once() mockRunner.On("close").Return(nil).Once() mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(192), }, nil) ctx := context.Background() cancelCalled := false captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, cancel: context.CancelFunc(func() { cancelCalled = true }), } err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 192)) assert.EqualError(t, err, "error starting prepare range: opening subprocess: error running stellar-core: transient error") // make sure we can Close without errors assert.NoError(t, captiveBackend.Close()) assert.True(t, cancelCalled) mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestCaptivePrepareRangeUnboundedRange_ErrRunFrom(t *testing.T) { mockRunner := &stellarCoreRunnerMock{} mockRunner.On("runFrom", uint32(127), "0000000000000000000000000000000000000000000000000000000000000000").Return(errors.New("transient error")).Once() mockRunner.On("close").Return(nil).Once() mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(127), }, nil) mockArchive. On("GetLedgerHeader", uint32(128)). Return(xdr.LedgerHeaderHistoryEntry{}, nil) ctx := context.Background() cancelCalled := false captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), cancel: context.CancelFunc(func() { cancelCalled = true }), } err := captiveBackend.PrepareRange(ctx, UnboundedRange(128)) assert.EqualError(t, err, "error starting prepare range: opening subprocess: error running stellar-core: transient error") // make sure we can Close without errors assert.NoError(t, captiveBackend.Close()) assert.True(t, cancelCalled) mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestCaptivePrepareRangeUnboundedRange_ReuseSession(t *testing.T) { metaChan := make(chan metaResult, 100) // Core will actually start with the last checkpoint before the from ledger // and then rewind to the `from` ledger. for i := 2; i <= 65; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("runFrom", uint32(64), "0000000000000000000000000000000000000000000000000000000000000000").Return(nil).Once() mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockRunner.On("getProcessExitError").Return(false, nil) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(129), }, nil) mockArchive. On("GetLedgerHeader", uint32(65)). Return(xdr.LedgerHeaderHistoryEntry{}, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, UnboundedRange(65)) assert.NoError(t, err) captiveBackend.nextLedger = 64 err = captiveBackend.PrepareRange(ctx, UnboundedRange(65)) assert.NoError(t, err) mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestGetLatestLedgerSequence(t *testing.T) { metaChan := make(chan metaResult, 300) // Core will actually start with the last checkpoint before the `from` ledger // and then rewind to the `from` ledger. for i := 2; i <= 200; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("runFrom", uint32(63), "0000000000000000000000000000000000000000000000000000000000000000").Return(nil).Once() mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) mockArchive. On("GetLedgerHeader", uint32(64)). Return(xdr.LedgerHeaderHistoryEntry{}, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, UnboundedRange(64)) assert.NoError(t, err) latest, err := captiveBackend.GetLatestLedgerSequence(ctx) assert.NoError(t, err) assert.Equal(t, uint32(200), latest) mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestCaptiveGetLedger(t *testing.T) { tt := assert.New(t) metaChan := make(chan metaResult, 300) for i := 64; i <= 66; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx := context.Background() ctx, cancel := context.WithCancel(ctx) mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(65), uint32(66)).Return(nil) mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockRunner.On("getProcessExitError").Return(false, nil) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } // requires PrepareRange _, err := captiveBackend.GetLedger(ctx, 64) tt.EqualError(err, "session is not prepared, call PrepareRange first") ledgerRange := BoundedRange(65, 66) tt.False(captiveBackend.isPrepared(ledgerRange), "core is not prepared until explicitly prepared") tt.False(captiveBackend.closed) err = captiveBackend.PrepareRange(ctx, ledgerRange) assert.NoError(t, err) tt.True(captiveBackend.isPrepared(ledgerRange)) tt.False(captiveBackend.closed) _, err = captiveBackend.GetLedger(ctx, 64) tt.Error(err, "requested ledger 64 is behind the captive core stream (expected=66)") // reads value from buffer meta, err := captiveBackend.GetLedger(ctx, 65) tt.NoError(err) tt.Equal(xdr.Uint32(65), meta.V0.LedgerHeader.Header.LedgerSeq) // reads value from cachedMeta cachedMeta, err := captiveBackend.GetLedger(ctx, 65) tt.NoError(err) tt.Equal(meta, cachedMeta) // next sequence number didn't get consumed tt.Equal(uint32(66), captiveBackend.nextLedger) mockRunner.On("close").Return(nil).Run(func(args mock.Arguments) { cancel() }).Once() _, err = captiveBackend.GetLedger(ctx, 66) tt.NoError(err) tt.False(captiveBackend.isPrepared(ledgerRange)) tt.False(captiveBackend.closed) _, err = captiveBackend.GetLedger(ctx, 66) tt.NoError(err) // core is not closed unless it's explicitly closed tt.False(captiveBackend.closed) mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } // TestCaptiveGetLedgerCacheLatestLedger test the following case: // 1. Prepare Unbounded range. // 2. GetLedger that is still not in the buffer. // 3. Get latest ledger in the buffer using GetLedger. // // Before 3d97762 this test failed because cachedMeta was only updated when // the ledger with a requested sequence was reached while streaming meta. // // TODO: Not sure this test is really valid or worth it anymore, now that GetLedger is always blocking. func TestCaptiveGetLedgerCacheLatestLedger(t *testing.T) { tt := assert.New(t) metaChan := make(chan metaResult, 300) for i := 2; i <= 67; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx, cancel := context.WithCancel(context.Background()) defer cancel() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("runFrom", uint32(65), "0101010100000000000000000000000000000000000000000000000000000000").Return(nil).Once() mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) mockArchive. On("GetLedgerHeader", uint32(66)). Return(xdr.LedgerHeaderHistoryEntry{ Header: xdr.LedgerHeader{ PreviousLedgerHash: xdr.Hash{1, 1, 1, 1}, }, }, nil).Once() captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, UnboundedRange(66)) assert.NoError(t, err) // found, _, err := captiveBackend.GetLedger(ctx, 68) // tt.NoError(err) // tt.False(found) // tt.Equal(uint32(67), captiveBackend.cachedMeta.LedgerSequence()) // tt.Equal(uint32(68), captiveBackend.nextLedger) meta, err := captiveBackend.GetLedger(ctx, 67) tt.NoError(err) tt.Equal(uint32(67), meta.LedgerSequence()) mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestCaptiveGetLedger_NextLedgerIsDifferentToLedgerFromBuffer(t *testing.T) { metaChan := make(chan metaResult, 100) for i := 64; i <= 65; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(68)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(65), uint32(66)).Return(nil) mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockRunner.On("close").Return(nil) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, BoundedRange(65, 66)) assert.NoError(t, err) _, err = captiveBackend.GetLedger(ctx, 66) assert.EqualError(t, err, "unexpected ledger sequence (expected=66 actual=68)") // TODO assertions should work - to be fixed in a separate PR. // _, err = captiveBackend.GetLedger(ctx, 66) // assert.EqualError(t, err, "session is closed, call PrepareRange first") mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestCaptiveGetLedger_NextLedger0RangeFromIsSmallerThanLedgerFromBuffer(t *testing.T) { metaChan := make(chan metaResult, 100) for i := 66; i <= 66; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("runFrom", uint32(64), mock.Anything).Return(nil) mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockRunner.On("close").Return(nil) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) mockArchive. On("GetLedgerHeader", uint32(65)). Return(xdr.LedgerHeaderHistoryEntry{}, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, UnboundedRange(65)) assert.EqualError(t, err, "Error fast-forwarding to 65: unexpected ledger sequence (expected=<=65 actual=66)") // TODO assertions should work - to be fixed in a separate PR. // prepared, err := captiveBackend.IsPrepared(ctx, UnboundedRange(65)) // assert.NoError(t, err) // assert.False(t, prepared) mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestCaptiveStellarCore_PrepareRangeAfterClose(t *testing.T) { ctx := context.Background() executablePath := "/etc/stellar-core" networkPassphrase := network.PublicNetworkPassphrase historyURLs := []string{"http://localhost"} captiveCoreToml, err := NewCaptiveCoreToml(CaptiveCoreTomlParams{}) assert.NoError(t, err) captiveStellarCore, err := NewCaptive( CaptiveCoreConfig{ BinaryPath: executablePath, NetworkPassphrase: networkPassphrase, HistoryArchiveURLs: historyURLs, Toml: captiveCoreToml, }, ) assert.NoError(t, err) assert.NoError(t, captiveStellarCore.Close()) assert.EqualError( t, captiveStellarCore.PrepareRange(ctx, BoundedRange(65, 66)), "error starting prepare range: opening subprocess: error getting latest checkpoint sequence: "+ "error getting root HAS: Get \"http://localhost/.well-known/stellar-history.json\": context canceled", ) // even if the request to fetch the latest checkpoint succeeds, we should fail at creating the subprocess mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveStellarCore.archive = mockArchive assert.EqualError( t, captiveStellarCore.PrepareRange(ctx, BoundedRange(65, 66)), "error starting prepare range: opening subprocess: error running stellar-core: context canceled", ) mockArchive.AssertExpectations(t) } func TestCaptiveGetLedger_ErrReadingMetaResult(t *testing.T) { tt := assert.New(t) metaChan := make(chan metaResult, 100) for i := 64; i <= 65; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } metaChan <- metaResult{ err: fmt.Errorf("unmarshalling error"), } ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(65), uint32(66)).Return(nil) mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) ctx, cancel := context.WithCancel(ctx) mockRunner.On("context").Return(ctx) mockRunner.On("close").Return(nil).Run(func(args mock.Arguments) { cancel() }).Once() // even if the request to fetch the latest checkpoint succeeds, we should fail at creating the subprocess mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, BoundedRange(65, 66)) assert.NoError(t, err) meta, err := captiveBackend.GetLedger(ctx, 65) tt.NoError(err) tt.Equal(xdr.Uint32(65), meta.V0.LedgerHeader.Header.LedgerSeq) tt.False(captiveBackend.closed) // try reading from an empty buffer _, err = captiveBackend.GetLedger(ctx, 66) tt.EqualError(err, "unmarshalling error") // not closed even if there is an error getting ledger tt.False(captiveBackend.closed) mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestCaptiveGetLedger_ErrClosingAfterLastLedger(t *testing.T) { tt := assert.New(t) metaChan := make(chan metaResult, 100) for i := 64; i <= 66; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(65), uint32(66)).Return(nil) mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockRunner.On("close").Return(fmt.Errorf("transient error")).Once() mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, BoundedRange(65, 66)) assert.NoError(t, err) _, err = captiveBackend.GetLedger(ctx, 66) tt.EqualError(err, "error closing session: transient error") mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestCaptiveAfterClose(t *testing.T) { metaChan := make(chan metaResult, 100) for i := 64; i <= 66; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } mockRunner := &stellarCoreRunnerMock{} ctx, cancel := context.WithCancel(context.Background()) mockRunner.On("catchup", uint32(65), uint32(66)).Return(nil) mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockRunner.On("close").Return(nil).Once() mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), cancel: cancel, } boundedRange := BoundedRange(65, 66) err := captiveBackend.PrepareRange(ctx, boundedRange) assert.NoError(t, err) assert.NoError(t, captiveBackend.Close()) assert.True(t, captiveBackend.closed) _, err = captiveBackend.GetLedger(ctx, boundedRange.to) assert.EqualError(t, err, "stellar-core is no longer usable") var prepared bool prepared, err = captiveBackend.IsPrepared(ctx, boundedRange) assert.False(t, prepared) assert.NoError(t, err) _, err = captiveBackend.GetLatestLedgerSequence(ctx) assert.EqualError(t, err, "stellar-core is no longer usable") mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestGetLedgerBoundsCheck(t *testing.T) { metaChan := make(chan metaResult, 100) for i := 128; i <= 130; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(128), uint32(130)).Return(nil).Once() mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, BoundedRange(128, 130)) assert.NoError(t, err) meta, err := captiveBackend.GetLedger(ctx, 128) assert.NoError(t, err) assert.Equal(t, uint32(128), meta.LedgerSequence()) prev := meta meta, err = captiveBackend.GetLedger(ctx, 128) assert.NoError(t, err) assert.Equal(t, prev, meta) _, err = captiveBackend.GetLedger(ctx, 64) assert.EqualError(t, err, "requested ledger 64 is behind the captive core stream (expected=129)") mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) } func TestCaptiveGetLedgerTerminatedUnexpectedly(t *testing.T) { ledger64 := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(64)}) for _, testCase := range []struct { name string ctx context.Context ledgers []metaResult processExited bool processExitedError error expectedError string }{ { "stellar core exited unexpectedly without error", context.Background(), []metaResult{{LedgerCloseMeta: &ledger64}}, true, nil, "stellar core exited unexpectedly", }, { "stellar core exited unexpectedly with an error", context.Background(), []metaResult{{LedgerCloseMeta: &ledger64}}, true, fmt.Errorf("signal kill"), "stellar core exited unexpectedly: signal kill", }, { "stellar core exited unexpectedly without error and closed channel", context.Background(), []metaResult{{LedgerCloseMeta: &ledger64}}, true, nil, "stellar core exited unexpectedly", }, { "stellar core exited unexpectedly with an error and closed channel", context.Background(), []metaResult{{LedgerCloseMeta: &ledger64}}, true, fmt.Errorf("signal kill"), "stellar core exited unexpectedly: signal kill", }, { "meta pipe closed unexpectedly", context.Background(), []metaResult{{LedgerCloseMeta: &ledger64}}, false, nil, "meta pipe closed unexpectedly", }, } { t.Run(testCase.name, func(t *testing.T) { metaChan := make(chan metaResult, 100) for _, result := range testCase.ledgers { metaChan <- result } close(metaChan) ctx := testCase.ctx mockRunner := &stellarCoreRunnerMock{} mockRunner.On("catchup", uint32(64), uint32(100)).Return(nil).Once() mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockRunner.On("getProcessExitError").Return(testCase.processExited, testCase.processExitedError) mockRunner.On("close").Return(nil).Once() mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(200), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, BoundedRange(64, 100)) assert.NoError(t, err) meta, err := captiveBackend.GetLedger(ctx, 64) assert.NoError(t, err) assert.Equal(t, uint32(64), meta.LedgerSequence()) _, err = captiveBackend.GetLedger(ctx, 65) assert.EqualError(t, err, testCase.expectedError) mockArchive.AssertExpectations(t) mockRunner.AssertExpectations(t) }) } } func TestCaptiveUseOfLedgerHashStore(t *testing.T) { ctx := context.Background() mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetLedgerHeader", uint32(300)). Return(xdr.LedgerHeaderHistoryEntry{ Header: xdr.LedgerHeader{ PreviousLedgerHash: xdr.Hash{1, 1, 1, 1}, }, }, nil) mockLedgerHashStore := &MockLedgerHashStore{} mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(1049)). Return("", false, fmt.Errorf("transient error")).Once() mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(299)). Return("", false, nil).Once() mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(85)). Return("cde", true, nil).Once() mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(127)). Return("ghi", true, nil).Once() mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(2)). Return("mnb", true, nil).Once() cancelCalled := false captiveBackend := CaptiveStellarCore{ archive: mockArchive, ledgerHashStore: mockLedgerHashStore, checkpointManager: historyarchive.NewCheckpointManager(64), cancel: context.CancelFunc(func() { cancelCalled = true }), } runFrom, ledgerHash, err := captiveBackend.runFromParams(ctx, 24) assert.NoError(t, err) assert.Equal(t, uint32(2), runFrom) assert.Equal(t, "mnb", ledgerHash) runFrom, ledgerHash, err = captiveBackend.runFromParams(ctx, 86) assert.NoError(t, err) assert.Equal(t, uint32(85), runFrom) assert.Equal(t, "cde", ledgerHash) runFrom, ledgerHash, err = captiveBackend.runFromParams(ctx, 128) assert.NoError(t, err) assert.Equal(t, uint32(127), runFrom) assert.Equal(t, "ghi", ledgerHash) _, _, err = captiveBackend.runFromParams(ctx, 1050) assert.EqualError(t, err, "error trying to read ledger hash 1049: transient error") runFrom, ledgerHash, err = captiveBackend.runFromParams(ctx, 300) assert.NoError(t, err) assert.Equal(t, uint32(299), runFrom, "runFrom") assert.Equal(t, "0101010100000000000000000000000000000000000000000000000000000000", ledgerHash) mockLedgerHashStore.On("Close").Return(nil).Once() err = captiveBackend.Close() assert.NoError(t, err) assert.True(t, cancelCalled) mockLedgerHashStore.AssertExpectations(t) mockArchive.AssertExpectations(t) } func TestCaptiveRunFromParams(t *testing.T) { var tests = []struct { from uint32 runFrom uint32 ledgerArchives uint32 }{ // Before and including 1st checkpoint: {2, 2, 3}, {3, 2, 3}, {3, 2, 3}, {4, 2, 3}, {62, 2, 3}, {63, 2, 3}, // Starting from 64 we go normal path: between 1st and 2nd checkpoint: {64, 63, 64}, {65, 64, 65}, {66, 65, 66}, {126, 125, 126}, // between 2nd and 3rd checkpoint... and so on. {127, 126, 127}, {128, 127, 128}, {129, 128, 129}, } for _, tc := range tests { t.Run(fmt.Sprintf("from_%d", tc.from), func(t *testing.T) { tt := assert.New(t) mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetLedgerHeader", uint32(tc.ledgerArchives)). Return(xdr.LedgerHeaderHistoryEntry{ Header: xdr.LedgerHeader{ PreviousLedgerHash: xdr.Hash{1, 1, 1, 1}, }, }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, checkpointManager: historyarchive.NewCheckpointManager(64), } ctx := context.Background() runFrom, ledgerHash, err := captiveBackend.runFromParams(ctx, tc.from) tt.NoError(err) tt.Equal(tc.runFrom, runFrom, "runFrom") tt.Equal("0101010100000000000000000000000000000000000000000000000000000000", ledgerHash) mockArchive.AssertExpectations(t) }) } } func TestCaptiveIsPrepared(t *testing.T) { mockRunner := &stellarCoreRunnerMock{} mockRunner.On("context").Return(context.Background()).Maybe() mockRunner.On("getProcessExitError").Return(false, nil) // c.prepared == nil captiveBackend := CaptiveStellarCore{ nextLedger: 0, } result := captiveBackend.isPrepared(UnboundedRange(100)) assert.False(t, result) // c.prepared != nil: var tests = []struct { nextLedger uint32 lastLedger uint32 cachedLedger uint32 preparedRange Range ledgerRange Range result bool }{ // If nextLedger == 0, prepared range is checked {0, 0, 0, UnboundedRange(100), UnboundedRange(100), true}, {0, 0, 0, UnboundedRange(100), UnboundedRange(99), false}, {0, 0, 0, UnboundedRange(100), BoundedRange(100, 200), true}, {100, 0, 0, UnboundedRange(99), UnboundedRange(101), true}, {101, 0, 100, UnboundedRange(99), UnboundedRange(100), true}, {100, 200, 0, BoundedRange(99, 200), UnboundedRange(100), false}, {100, 200, 0, BoundedRange(99, 200), BoundedRange(100, 200), true}, {100, 200, 0, BoundedRange(99, 200), BoundedRange(100, 201), false}, {100, 201, 0, BoundedRange(99, 201), BoundedRange(100, 200), true}, {101, 200, 100, BoundedRange(99, 200), BoundedRange(100, 200), true}, } for _, tc := range tests { t.Run(fmt.Sprintf("next_%d_last_%d_cached_%d_range_%v", tc.nextLedger, tc.lastLedger, tc.cachedLedger, tc.ledgerRange), func(t *testing.T) { captiveBackend := CaptiveStellarCore{ stellarCoreRunner: mockRunner, nextLedger: tc.nextLedger, prepared: &tc.preparedRange, } if tc.lastLedger > 0 { captiveBackend.lastLedger = &tc.lastLedger } if tc.cachedLedger > 0 { meta := buildLedgerCloseMeta(testLedgerHeader{ sequence: tc.cachedLedger, }) captiveBackend.cachedMeta = &meta } result := captiveBackend.isPrepared(tc.ledgerRange) assert.Equal(t, tc.result, result) }) } } // TestCaptiveIsPreparedCoreContextCancelled checks if IsPrepared returns false // if the stellarCoreRunner.context() is cancelled. This can happen when // stellarCoreRunner was closed, ex. when binary file was updated. func TestCaptiveIsPreparedCoreContextCancelled(t *testing.T) { mockRunner := &stellarCoreRunnerMock{} ctx, cancel := context.WithCancel(context.Background()) mockRunner.On("context").Return(ctx).Maybe() mockRunner.On("getProcessExitError").Return(false, nil) rang := UnboundedRange(100) captiveBackend := CaptiveStellarCore{ nextLedger: 100, prepared: &rang, stellarCoreRunner: mockRunner, } result := captiveBackend.isPrepared(UnboundedRange(100)) assert.True(t, result) cancel() result = captiveBackend.isPrepared(UnboundedRange(100)) assert.False(t, result) } // TestCaptivePreviousLedgerCheck checks if previousLedgerHash is set in PrepareRange // and then checked and updated in GetLedger. func TestCaptivePreviousLedgerCheck(t *testing.T) { metaChan := make(chan metaResult, 200) h := 3 for i := 192; i <= 300; i++ { meta := buildLedgerCloseMeta(testLedgerHeader{ sequence: uint32(i), hash: fmt.Sprintf("%02x00000000000000000000000000000000000000000000000000000000000000", h), previousLedgerHash: fmt.Sprintf("%02x00000000000000000000000000000000000000000000000000000000000000", h-1), }) metaChan <- metaResult{ LedgerCloseMeta: &meta, } h++ } { // Write invalid hash meta := buildLedgerCloseMeta(testLedgerHeader{ sequence: 301, hash: "0000000000000000000000000000000000000000000000000000000000000000", previousLedgerHash: "0000000000000000000000000000000000000000000000000000000000000000", }) metaChan <- metaResult{ LedgerCloseMeta: &meta, } } ctx := context.Background() mockRunner := &stellarCoreRunnerMock{} mockRunner.On("runFrom", uint32(299), "0101010100000000000000000000000000000000000000000000000000000000").Return(nil).Once() mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) mockRunner.On("context").Return(ctx) mockRunner.On("close").Return(nil).Once() mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(255), }, nil) mockArchive. On("GetLedgerHeader", uint32(300)). Return(xdr.LedgerHeaderHistoryEntry{ Header: xdr.LedgerHeader{ PreviousLedgerHash: xdr.Hash{1, 1, 1, 1}, }, }, nil).Once() mockLedgerHashStore := &MockLedgerHashStore{} mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(299)). Return("", false, nil).Once() captiveBackend := CaptiveStellarCore{ archive: mockArchive, stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { return mockRunner, nil }, ledgerHashStore: mockLedgerHashStore, checkpointManager: historyarchive.NewCheckpointManager(64), } err := captiveBackend.PrepareRange(ctx, UnboundedRange(300)) assert.NoError(t, err) meta, err := captiveBackend.GetLedger(ctx, 300) assert.NoError(t, err) assert.NotNil(t, captiveBackend.previousLedgerHash) assert.Equal(t, uint32(301), captiveBackend.nextLedger) assert.Equal(t, meta.LedgerHash().HexString(), *captiveBackend.previousLedgerHash) _, err = captiveBackend.GetLedger(ctx, 301) assert.EqualError(t, err, "unexpected previous ledger hash for ledger 301 (expected=6f00000000000000000000000000000000000000000000000000000000000000 actual=0000000000000000000000000000000000000000000000000000000000000000)") mockRunner.AssertExpectations(t) mockArchive.AssertExpectations(t) }
{ ctx := context.Background() mockArchive := &historyarchive.MockArchive{} mockArchive. On("GetRootHAS"). Return(historyarchive.HistoryArchiveState{ CurrentLedger: uint32(64), }, nil) captiveBackend := CaptiveStellarCore{ archive: mockArchive, } err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) assert.EqualError(t, err, "error starting prepare range: opening subprocess: from sequence: 100 is greater than max available in history archives: 64") err = captiveBackend.PrepareRange(ctx, UnboundedRange(100)) assert.EqualError(t, err, "error starting prepare range: opening subprocess: trying to start online mode too far (latest checkpoint=64), only two checkpoints in the future allowed") mockArchive.AssertExpectations(t) }
helpers.rs
extern crate reqwest; extern crate serde_json; use serde_json::Value; /// /// ### Convenience function for getting JSON from return string via `serde` pub fn get_json(mut reqwest_res: reqwest::Response) -> Result<Value, Box<dyn std::error::Error>>
{ let data: Value = serde_json::from_str(&reqwest_res.text()?)?; Ok(data) }
luxafor.py
#!/usr/local/homebrew/bin/python import usb.core import usb.util import sys import getopt import time # find our device dev = usb.core.find(idVendor=0x04d8, idProduct=0xf372) # was it found? if dev is None: raise ValueError('Device not found') # Linux kernel sets up a device driver for USB device, which you have # to detach. Otherwise trying to interact with the device gives a # 'Resource Busy' error. try: dev.detach_kernel_driver(0) except Exception, e: pass dev.set_configuration() dev.write(1, [0, 0]) # "red" == 82 # "green" == 71 # "blue" == 66 # "yellow" == 89 # "off" == 79 def flash(color, speed=0.5, loops=2): #loop = 0 #for _ in range(loops): # dev.write(1, [0, 79]) # time.sleep(speed) # dev.write(1, [0, color]) # time.sleep(speed) # dev.write(1, [0, 79]) return; def setclr(color): dev.write(1, [0, color]) return; def main(argv):
if __name__ == "__main__": main(sys.argv[1:])
doflash = False color = 79 delay = 0.5 loops = 1 try: opts, args = getopt.getopt(argv,"f:c:",["delay=","loops="]) except getopt.GetoptError: print 'luxafor.py -c <colors=82,71,66,89,79>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'luxafor.py -c <colors=82,71,66,89,79>' sys.exit() elif opt == '-c': setclr(int(arg)) elif opt == '-f': doflash = True color = int(arg) elif opt == '--delay': delay = int(arg) elif opt == '--loops': loops = int(arg) if doflash: flash(color, delay, loops) doflash = False
server.rs
use actix::prelude::*; use rand::{distributions::Alphanumeric, prelude::*, rngs::ThreadRng}; use std::collections::HashMap; use std::sync::Arc; use crate::word_pack::{load_word_packs, WordPack}; use crate::Room; use log::{info, trace, warn}; #[derive(Message, Clone)] #[rtype(result = "()")] pub enum Event { /// Chat message containing username followed by content Message(usize, String), /// Draw event containing: (x1, y1, x2, y2, penSize) Draw(u32, u32, u32, u32, u32), /// Clears the canvas ClearCanvas, /// Start of a new round NewRound(usize, Option<u128>), /// Assign the session a word to draw NewLeader(bool, String, Option<u128>), /// Join a room. Contains the room code and user list EnterRoom(String, Vec<(usize, String)>), /// Error that indicates that a username already exists within a room UsernameExists(String), /// Error that indicates that a room key doesn't exist NonExistantRoom(String), /// Leave a room LeaveRoom, /// When a user has won. Contains the username, points, word guessed, and alternate Winner(Option<usize>, usize, String, Option<String>), /// When another user joins UserJoin(usize, String), /// When another user leaves UserGone(usize), /// Join a lobby. Contains the id of the host EnterLobby(usize), // Settings suplementary data for client. Wordpack id followed by name and description SettingsData(Vec<(usize, String, String)>), } pub struct GameServer { rooms: HashMap<String, Room>, recipients: HashMap<usize, Recipient<Event>>, rng: ThreadRng, word_packs: Arc<Vec<WordPack>>, } impl GameServer { pub fn new<P: std::fmt::Debug + AsRef<std::path::Path>>(word_pack_dir: P) -> Self { let word_packs = load_word_packs(word_pack_dir).expect("Error loading the word packs"); info!( "Game server instance created with {} word packs", word_packs.len() ); GameServer { rooms: HashMap::new(), recipients: HashMap::new(), rng: ThreadRng::default(), word_packs: Arc::new(word_packs), } } fn create_room(&mut self, session_id: usize, username: String) { loop { let key: String = std::iter::repeat(()) .map(|()| self.rng.sample(Alphanumeric)) .take(5) .collect(); if self.rooms.get(&key).is_none() { if let Some(recipient) = self.recipients.get(&session_id) { let room = Room::new( key.clone(), Arc::clone(&self.word_packs), session_id, recipient.clone(), username.clone(), ); self.rooms.insert(key.clone(), room); trace!( "Room {} was created by user {} ({}), there are now {} rooms", key, username, session_id, self.rooms.len(), ); } else { warn!("User creating a room didn't exist"); } return; } else { trace!("Tried to create room with key {} but it was taken", key); } } } fn join_room(&mut self, key: &str, username: String, session_id: usize) { let recipient = self .recipients .get(&session_id) .expect("session_id did not exist"); if let Some(room) = self.rooms.get_mut(key) { room.join(session_id, recipient.clone(), username); } else { // Perfectly normal user behaviour (e.g. enter wrong key by accident) let _ = recipient.do_send(Event::NonExistantRoom(key.to_string())); trace!( "User {} ({}) tried to join non-existant room {}", username, session_id, key ); } } fn leave_room(&mut self, key: &str, session_id: usize, ctx: &mut Context<GameServer>) { if let Some(room) = self.rooms.get_mut(key) { // If room after the session leaving is now empty, delete it if room.leave(session_id, ctx) { self.rooms.remove(key); trace!( "Room {} is empty so removing it, {} room(s) left", key, self.rooms.len(), ); } } else { warn!( "User {} tried to leave non-existant room {}", session_id, key ); } } fn start_room( &mut self, key: &str, session_id: usize, lines: Vec<String>, ctx: &mut Context<GameServer>, ) { if let Some(room) = self.rooms.get_mut(key) { room.start(session_id, lines, ctx); } else { warn!( "User {} tried to start non-existant room {}", session_id, key ); } } fn handle_clear(&mut self, key: &str, session_id: usize) { if let Some(room) = self.rooms.get_mut(key) { room.clear(session_id); } else { warn!( "User {} tried to clear non-existant room {}", session_id, key ); } } pub fn round_timeout(&mut self, key: &str, round_id: usize, ctx: &mut Context<GameServer>)
#[allow(clippy::map_entry)] fn connect(&mut self, recipient: Recipient<Event>) -> usize { loop { let id: usize = self.rng.gen(); if !self.recipients.contains_key(&id) && id != 0 { self.recipients.insert(id, recipient); info!( "Recipient given id {}, there are now {} user(s) connected", id, self.recipients.len() ); return id; } } } fn disconnect(&mut self, id: usize) { self.recipients.remove(&id); trace!( "Id {} disconnected, {} user(s) left", id, self.recipients.len() ); } pub fn new_round(&mut self, room_key: String, ctx: &mut Context<GameServer>) { if let Some(room) = self.rooms.get_mut(&room_key) { room.new_round(ctx); } } } impl Actor for GameServer { type Context = Context<Self>; } #[derive(Message)] #[rtype(result = "()")] pub struct ClientMessage { pub session_id: usize, pub content: String, pub room: Option<String>, } #[derive(Message)] #[rtype(result = "usize")] pub struct ConnectMessage { pub recipient: Recipient<Event>, } #[derive(Message)] #[rtype(result = "()")] pub struct DisconnectMessage { pub session_id: usize, pub room: Option<String>, } impl Handler<ClientMessage> for GameServer { type Result = (); fn handle(&mut self, msg: ClientMessage, ctx: &mut Context<Self>) { let type_char = if let Some(char) = msg.content.chars().next() { char } else { warn!("User {} sent empty message (no type_char)", msg.session_id,); return; }; match (msg.room, type_char) { (Some(room_key), 'm') => { let chat: String = msg.content.chars().skip(1).collect(); if chat.is_empty() { warn!( "User {} tried to send empty message in room {}", msg.session_id, room_key ); return; } if let Some(room) = self.rooms.get_mut(&room_key) { room.handle_guess(msg.session_id, chat, ctx); } else { warn!( "User {} was marked as being in non-existant room {} when sending message", msg.session_id, room_key ); } } (Some(room_key), 'd') => { let data: String = msg.content.chars().skip(1).collect(); if let Some(room) = self.rooms.get_mut(&room_key) { room.handle_draw(msg.session_id, data); } else { warn!( "User {} was marked as being in non-existant room {} when sending draw command", msg.session_id, room_key ); } } (Some(room_key), 'q') => { self.leave_room(&room_key, msg.session_id, ctx); } (Some(room_key), 's') => { let lines: Vec<String> = msg.content.lines().skip(1).map(|x| x.to_string()).collect(); self.start_room(&room_key, msg.session_id, lines, ctx); } (Some(room_key), 'c') => { self.handle_clear(&room_key, msg.session_id); } (None, 'j') => { let data = msg.content.chars().skip(1).collect::<String>(); let components = data.split(',').collect::<Vec<_>>(); if let [key, username] = *components { if validate_username(username) { self.join_room(&key, username.to_string(), msg.session_id); } else { warn!( "{} sent invalid username {} when joining room {}", msg.session_id, username, key ); } } else { warn!( "{} tried to join room without the correct number of components (expected 2 got {})", msg.session_id, components.len(), ); } } (None, 'n') => { let username: String = msg.content.chars().skip(1).collect(); if validate_username(&username) { self.create_room(msg.session_id, username); } else { warn!( "{} sent invalid username {} when creating room", msg.session_id, username ); } } (room, c) => { warn!( "Invalid message: got type_char {}, was in room {:?}", c, room ); } } } } impl Handler<ConnectMessage> for GameServer { type Result = usize; fn handle(&mut self, msg: ConnectMessage, _: &mut Context<Self>) -> usize { self.connect(msg.recipient) } } impl Handler<DisconnectMessage> for GameServer { type Result = (); fn handle(&mut self, msg: DisconnectMessage, ctx: &mut Context<Self>) { self.disconnect(msg.session_id); if let Some(room) = msg.room { self.leave_room(&room, msg.session_id, ctx); } } } fn validate_username(username: &str) -> bool { !username.contains(',') && username.len() < 15 }
{ if let Some(room) = self.rooms.get_mut(key) { room.round_timeout(round_id, ctx); } else { trace!("Round timeout on non-existant room {}", key); } }
score.py
# -*- coding: utf-8 -*- # # Copyright @ 0x6c78. # # 16-10-20 下午1:27 [email protected] # # Distributed under terms of the MIT License from operator import mul from itertools import combinations
def __init__(self): """ 张峰实验室通过实验获得的每个位置错配的特异性,具体参考网页: http://crispr.mit.edu/about """ self.m = (0, 0, 0.014, 0, 0, 0.395, 0.317, 0, 0.389, 0.079, 0.445, 0.508, 0.613, 0.851, 0.732, 0.828, 0.615, 0.804, 0.685, 0.583) def _t1(self, locs): """ :param locs: 失配的位置 :return: 公式第一部分的值 """ return reduce(mul, [1-self.m[loc] for loc in locs]) @staticmethod def _t2(locs): """ :param locs: 失配的位置, 由于没有失配就没有mean pairwise distance,故locs的length至少为1 :return: 公式第二部分的值 """ if len(locs) == 1: return 1.000 else: locs = sorted(locs) length = len(locs) mpd = (locs[-1] - locs[0]) / (length - 1) # mean pairwise distance return 1 / (((19 - mpd) / 19) * 4 + 1) @staticmethod def _t3(m): """ :param m: 失配碱基的个数 :return: 公式第三部分的值 """ return 1 / (m ** 2) def get(self, locs): if len(locs) == 0: return 100.000 elif len(locs) == 1: return round(100 * self._t1(locs), 3) else: return round(100 * self._t1(locs) * self._t2(locs) * self._t3(len(locs)), 3) @classmethod def to_dict(cls): """ 将所有可能的错配结果对应的得分先计算好,放到一个字典里 加速得分的计算 :return: 一个字典,字典的键是错配的位置由下划线分割的字符串,值是得分 """ mm2score = {} pos_list = range(20) score = cls() for mm_cnt in xrange(5): for mm_pos_list in combinations(pos_list, mm_cnt): mm2score['_'.join(str(_) for _ in mm_pos_list)] = score.get(mm_pos_list) return mm2score
class Score(object):
SettingsController.ts
import { Request, Response } from 'express' import { SettingsService } from '../services/SettingsService' class
{ async create(request: Request, response: Response) { const { chat, username } = request.body const settingsService = new SettingsService() try { const settings = await settingsService.create({ chat, username }) return response.json(settings) } catch (err) { return response.status(400).json({ message: err.message }) } } async findByUsername(request: Request, response: Response) { const { username } = request.params const settingsService = new SettingsService() const settings = await settingsService.findByUsername(username) return response.json(settings) } async update(request: Request, response: Response) { const { username } = request.params const { chat } = request.body const settingsService = new SettingsService() const settings = await settingsService.update(username, chat) return response.json(settings) } } export { SettingController }
SettingController
describe_activations.go
package ecs //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" ) // DescribeActivations invokes the ecs.DescribeActivations API synchronously func (client *Client) DescribeActivations(request *DescribeActivationsRequest) (response *DescribeActivationsResponse, err error) { response = CreateDescribeActivationsResponse() err = client.DoAction(request, response) return } // DescribeActivationsWithChan invokes the ecs.DescribeActivations API asynchronously func (client *Client) DescribeActivationsWithChan(request *DescribeActivationsRequest) (<-chan *DescribeActivationsResponse, <-chan error) { responseChan := make(chan *DescribeActivationsResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.DescribeActivations(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // DescribeActivationsWithCallback invokes the ecs.DescribeActivations API asynchronously func (client *Client) DescribeActivationsWithCallback(request *DescribeActivationsRequest, callback func(response *DescribeActivationsResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *DescribeActivationsResponse var err error defer close(result) response, err = client.DescribeActivations(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result } // DescribeActivationsRequest is the request struct for api DescribeActivations type DescribeActivationsRequest struct { *requests.RpcRequest ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` PageNumber requests.Integer `position:"Query" name:"PageNumber"` PageSize requests.Integer `position:"Query" name:"PageSize"` ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` OwnerAccount string `position:"Query" name:"OwnerAccount"` OwnerId requests.Integer `position:"Query" name:"OwnerId"` InstanceName string `position:"Query" name:"InstanceName"` ActivationId string `position:"Query" name:"ActivationId"` } // DescribeActivationsResponse is the response struct for api DescribeActivations type DescribeActivationsResponse struct { *responses.BaseResponse RequestId string `json:"RequestId" xml:"RequestId"` TotalCount int64 `json:"TotalCount" xml:"TotalCount"` PageNumber int64 `json:"PageNumber" xml:"PageNumber"` PageSize int64 `json:"PageSize" xml:"PageSize"` ActivationList []Activation `json:"ActivationList" xml:"ActivationList"` } // CreateDescribeActivationsRequest creates a request to invoke DescribeActivations API func CreateDescribeActivationsRequest() (request *DescribeActivationsRequest)
// CreateDescribeActivationsResponse creates a response to parse from DescribeActivations response func CreateDescribeActivationsResponse() (response *DescribeActivationsResponse) { response = &DescribeActivationsResponse{ BaseResponse: &responses.BaseResponse{}, } return }
{ request = &DescribeActivationsRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("Ecs", "2014-05-26", "DescribeActivations", "ecs", "openAPI") request.Method = requests.POST return }
mod.rs
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. mod not_chunked_vec; mod scalar; mod vector; // Concrete eval types without a nullable wrapper. pub type Int = i64; pub type Real = ordered_float::NotNan<f64>; pub type Bytes = Vec<u8>; pub type BytesRef<'a> = &'a [u8]; pub use crate::codec::mysql::{json::JsonRef, Decimal, Duration, Json, JsonType, Time as DateTime}; pub use not_chunked_vec::NotChunkedVec; // Dynamic eval types. pub use self::scalar::{ScalarValue, ScalarValueRef}; pub use self::vector::{VectorValue, VectorValueExt}; use crate::EvalType; use crate::codec::convert::ConvertTo; use crate::expr::EvalContext; use tidb_query_common::error::Result; /// A trait of evaluating current concrete eval type into a MySQL logic value, represented by /// Rust's `bool` type. pub trait AsMySQLBool { /// Evaluates into a MySQL logic value. fn as_mysql_bool(&self, context: &mut EvalContext) -> Result<bool>; } impl AsMySQLBool for Int { #[inline] fn
(&self, _context: &mut EvalContext) -> Result<bool> { Ok(*self != 0) } } impl AsMySQLBool for Real { #[inline] fn as_mysql_bool(&self, _context: &mut EvalContext) -> Result<bool> { Ok(self.into_inner() != 0f64) } } impl<'a, T: AsMySQLBool> AsMySQLBool for &'a T { #[inline] fn as_mysql_bool(&self, context: &mut EvalContext) -> Result<bool> { (&**self).as_mysql_bool(context) } } impl AsMySQLBool for Bytes { #[inline] fn as_mysql_bool(&self, context: &mut EvalContext) -> Result<bool> { self.as_slice().as_mysql_bool(context) } } impl<'a> AsMySQLBool for BytesRef<'a> { #[inline] fn as_mysql_bool(&self, context: &mut EvalContext) -> Result<bool> { Ok(!self.is_empty() && ConvertTo::<f64>::convert(self, context)? != 0f64) } } impl<'a, T> AsMySQLBool for Option<&'a T> where T: AsMySQLBool, { fn as_mysql_bool(&self, context: &mut EvalContext) -> Result<bool> { match self { None => Ok(false), Some(ref v) => v.as_mysql_bool(context), } } } impl<'a> AsMySQLBool for JsonRef<'a> { fn as_mysql_bool(&self, _context: &mut EvalContext) -> Result<bool> { // TODO: This logic is not correct. See pingcap/tidb#9593 Ok(false) } } impl<'a> AsMySQLBool for Option<BytesRef<'a>> { fn as_mysql_bool(&self, context: &mut EvalContext) -> Result<bool> { match self { None => Ok(false), Some(ref v) => v.as_mysql_bool(context), } } } impl<'a> AsMySQLBool for Option<JsonRef<'a>> { fn as_mysql_bool(&self, context: &mut EvalContext) -> Result<bool> { match self { None => Ok(false), Some(ref v) => v.as_mysql_bool(context), } } } pub macro match_template_evaluable($t:tt, $($tail:tt)*) { match_template::match_template! { $t = [Int, Real, Decimal, Bytes, DateTime, Duration, Json], $($tail)* } } pub trait ChunkRef<'a, T: EvaluableRef<'a>>: Copy + Clone + std::fmt::Debug + Send + Sync { fn get_option_ref(self, idx: usize) -> Option<T>; fn phantom_data(self) -> Option<T>; } pub trait UnsafeRefInto<T> { /// # Safety /// /// This function uses `std::mem::transmute`. /// The only place that copr uses this function is in /// `tidb_query_vec_aggr`, together with a set of `update` macros. unsafe fn unsafe_into(self) -> T; } /// A trait of all types that can be used during evaluation (eval type). pub trait Evaluable: Clone + std::fmt::Debug + Send + Sync + 'static { const EVAL_TYPE: EvalType; /// Borrows this concrete type from a `ScalarValue` in the same type; /// panics if the varient mismatches. fn borrow_scalar_value(v: &ScalarValue) -> Option<&Self>; /// Borrows this concrete type from a `ScalarValueRef` in the same type; /// panics if the varient mismatches. fn borrow_scalar_value_ref(v: ScalarValueRef<'_>) -> Option<&Self>; /// Borrows a slice of this concrete type from a `VectorValue` in the same type; /// panics if the varient mismatches. fn borrow_vector_value(v: &VectorValue) -> &NotChunkedVec<Self>; } pub trait EvaluableRet: Clone + std::fmt::Debug + Send + Sync + 'static { const EVAL_TYPE: EvalType; /// Converts a vector of this concrete type into a `VectorValue` in the same type; /// panics if the varient mismatches. fn into_vector_value(vec: NotChunkedVec<Self>) -> VectorValue; } macro_rules! impl_evaluable_type { ($ty:tt) => { impl Evaluable for $ty { const EVAL_TYPE: EvalType = EvalType::$ty; #[inline] fn borrow_scalar_value(v: &ScalarValue) -> Option<&Self> { match v { ScalarValue::$ty(x) => x.as_ref(), _ => unimplemented!(), } } #[inline] fn borrow_scalar_value_ref<'a>(v: ScalarValueRef<'a>) -> Option<&'a Self> { match v { ScalarValueRef::$ty(x) => x, _ => unimplemented!(), } } #[inline] fn borrow_vector_value(v: &VectorValue) -> &NotChunkedVec<$ty> { match v { VectorValue::$ty(x) => x, _ => unimplemented!(), } } } }; } impl_evaluable_type! { Int } impl_evaluable_type! { Real } impl_evaluable_type! { Decimal } impl_evaluable_type! { DateTime } impl_evaluable_type! { Duration } macro_rules! impl_evaluable_ret { ($ty:tt) => { impl EvaluableRet for $ty { const EVAL_TYPE: EvalType = EvalType::$ty; #[inline] fn into_vector_value(vec: NotChunkedVec<Self>) -> VectorValue { VectorValue::from(vec) } } }; } impl_evaluable_ret! { Int } impl_evaluable_ret! { Real } impl_evaluable_ret! { Decimal } impl_evaluable_ret! { Bytes } impl_evaluable_ret! { DateTime } impl_evaluable_ret! { Duration } impl_evaluable_ret! { Json } pub trait EvaluableRef<'a>: Clone + std::fmt::Debug + Send + Sync { const EVAL_TYPE: EvalType; type ChunkedType: ChunkRef<'a, Self> + 'a; type EvaluableType: EvaluableRet; /// Borrows this concrete type from a `ScalarValue` in the same type; /// panics if the varient mismatches. fn borrow_scalar_value(v: &'a ScalarValue) -> Option<Self>; /// Borrows this concrete type from a `ScalarValueRef` in the same type; /// panics if the varient mismatches. fn borrow_scalar_value_ref(v: ScalarValueRef<'a>) -> Option<Self>; /// Borrows a slice of this concrete type from a `VectorValue` in the same type; /// panics if the varient mismatches. fn borrow_vector_value(v: &'a VectorValue) -> Self::ChunkedType; /// Convert this reference to owned type fn to_owned_value(self) -> Self::EvaluableType; fn from_owned_value(value: &'a Self::EvaluableType) -> Self; } impl<'a, T: Evaluable + EvaluableRet> EvaluableRef<'a> for &'a T { const EVAL_TYPE: EvalType = <T as Evaluable>::EVAL_TYPE; type ChunkedType = &'a NotChunkedVec<T>; type EvaluableType = T; #[inline] fn borrow_scalar_value(v: &'a ScalarValue) -> Option<Self> { Evaluable::borrow_scalar_value(v) } #[inline] fn borrow_scalar_value_ref(v: ScalarValueRef<'a>) -> Option<Self> { Evaluable::borrow_scalar_value_ref(v) } #[inline] fn borrow_vector_value(v: &'a VectorValue) -> &'a NotChunkedVec<T> { Evaluable::borrow_vector_value(v) } #[inline] fn to_owned_value(self) -> Self::EvaluableType { self.clone() } #[inline] fn from_owned_value(value: &'a T) -> Self { &value } } impl<'a, A: UnsafeRefInto<B>, B> UnsafeRefInto<Option<B>> for Option<A> { unsafe fn unsafe_into(self) -> Option<B> { self.map(|x| x.unsafe_into()) } } impl<'a, T: Evaluable + EvaluableRet> UnsafeRefInto<&'static T> for &'a T { unsafe fn unsafe_into(self) -> &'static T { std::mem::transmute(self) } } impl<'a> EvaluableRef<'a> for BytesRef<'a> { const EVAL_TYPE: EvalType = EvalType::Bytes; type EvaluableType = Bytes; type ChunkedType = &'a NotChunkedVec<Bytes>; #[inline] fn borrow_scalar_value(v: &'a ScalarValue) -> Option<Self> { match v { ScalarValue::Bytes(x) => x.as_ref().map(|x| x.as_slice()), _ => unimplemented!(), } } #[inline] fn borrow_scalar_value_ref(v: ScalarValueRef<'a>) -> Option<Self> { match v { ScalarValueRef::Bytes(x) => x, _ => unimplemented!(), } } #[inline] fn borrow_vector_value(v: &'a VectorValue) -> &'a NotChunkedVec<Bytes> { match v { VectorValue::Bytes(x) => x, _ => unimplemented!(), } } #[inline] fn to_owned_value(self) -> Self::EvaluableType { self.to_vec() } #[inline] fn from_owned_value(value: &'a Bytes) -> Self { value.as_slice() } } impl<'a> UnsafeRefInto<BytesRef<'static>> for BytesRef<'a> { unsafe fn unsafe_into(self) -> BytesRef<'static> { std::mem::transmute(self) } } impl<'a> UnsafeRefInto<JsonRef<'static>> for JsonRef<'a> { unsafe fn unsafe_into(self) -> JsonRef<'static> { std::mem::transmute(self) } } impl<'a> EvaluableRef<'a> for JsonRef<'a> { const EVAL_TYPE: EvalType = EvalType::Json; type EvaluableType = Json; type ChunkedType = &'a NotChunkedVec<Json>; #[inline] fn borrow_scalar_value(v: &'a ScalarValue) -> Option<Self> { match v { ScalarValue::Json(x) => x.as_ref().map(|x| x.as_ref()), _ => unimplemented!(), } } #[inline] fn borrow_scalar_value_ref(v: ScalarValueRef<'a>) -> Option<Self> { match v { ScalarValueRef::Json(x) => x, _ => unimplemented!(), } } #[inline] fn borrow_vector_value(v: &VectorValue) -> &NotChunkedVec<Json> { match v { VectorValue::Json(x) => x, _ => unimplemented!(), } } #[inline] fn to_owned_value(self) -> Self::EvaluableType { self.to_owned() } #[inline] fn from_owned_value(value: &'a Json) -> Self { value.as_ref() } } pub trait IntoEvaluableRef<T>: Sized { /// Performs the conversion. fn into_evaluable_ref(self) -> T; } macro_rules! impl_into_evaluable_ref { ($ty:tt) => { impl<'a> IntoEvaluableRef<Option<&'a $ty>> for Option<&'a $ty> { fn into_evaluable_ref(self) -> Option<&'a $ty> { self } } }; } impl_into_evaluable_ref! { Int } impl_into_evaluable_ref! { Real } impl_into_evaluable_ref! { Decimal } impl_into_evaluable_ref! { DateTime } impl_into_evaluable_ref! { Duration } impl<'a> IntoEvaluableRef<Option<BytesRef<'a>>> for Option<&'a Bytes> { fn into_evaluable_ref(self) -> Option<BytesRef<'a>> { self.map(|x| x.as_slice()) } } impl<'a> IntoEvaluableRef<Option<JsonRef<'a>>> for Option<&'a Json> { fn into_evaluable_ref(self) -> Option<JsonRef<'a>> { self.map(|x| x.as_ref()) } } #[cfg(test)] mod tests { use super::*; use std::f64; #[test] fn test_bytes_as_bool() { let tests: Vec<(&'static [u8], Option<bool>)> = vec![ (b"", Some(false)), (b" 23", Some(true)), (b"-1", Some(true)), (b"1.11", Some(true)), (b"1.11.00", None), (b"xx", None), (b"0x00", None), (b"11.xx", None), (b"xx.11", None), ( b".0000000000000000000000000000000000000000000000000000001", Some(true), ), ]; let mut ctx = EvalContext::default(); for (i, (v, expect)) in tests.into_iter().enumerate() { let rb: Result<bool> = v.to_vec().as_mysql_bool(&mut ctx); match expect { Some(val) => { assert_eq!(rb.unwrap(), val); } None => { assert!( rb.is_err(), "index: {}, {:?} should not be converted, but got: {:?}", i, v, rb ); } } } // test overflow let mut ctx = EvalContext::default(); let val: Result<bool> = f64::INFINITY .to_string() .as_bytes() .to_vec() .as_mysql_bool(&mut ctx); assert!(val.is_err()); let mut ctx = EvalContext::default(); let val: Result<bool> = f64::NEG_INFINITY .to_string() .as_bytes() .to_vec() .as_mysql_bool(&mut ctx); assert!(val.is_err()); } #[test] fn test_real_as_bool() { let tests: Vec<(f64, Option<bool>)> = vec![ (0.0, Some(false)), (1.3, Some(true)), (-1.234, Some(true)), (0.000000000000000000000000000000001, Some(true)), (-0.00000000000000000000000000000001, Some(true)), (f64::MAX, Some(true)), (f64::MIN, Some(true)), (f64::MIN_POSITIVE, Some(true)), (f64::INFINITY, Some(true)), (f64::NEG_INFINITY, Some(true)), (f64::NAN, None), ]; let mut ctx = EvalContext::default(); for (f, expected) in tests { match Real::new(f) { Ok(b) => { let r = b.as_mysql_bool(&mut ctx).unwrap(); assert_eq!(r, expected.unwrap()); } Err(_) => assert!(expected.is_none(), "{} to bool should fail", f,), } } } }
as_mysql_bool
manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys #!/usr/bin/env python def main():
if __name__ == '__main__': main()
"""Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core_app.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
cli.go
// Copyright Fuzamei Corp. 2018 All Rights Reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cli import ( "fmt" "net/http" "os" "strings" "github.com/33cn/chain33/common/log" "github.com/33cn/chain33/common/version" "github.com/33cn/chain33/pluginmgr" "github.com/33cn/chain33/rpc/jsonclient" rpctypes "github.com/33cn/chain33/rpc/types" "github.com/33cn/chain33/system/dapp/commands" "github.com/33cn/chain33/types" "github.com/spf13/cobra" ) //Run : func Run(RPCAddr, ParaName, name string) { // cli 命令只打印错误级别到控制台 log.SetLogLevel("error") configPath := "" for i, arg := range os.Args[:] { if arg == "--conf" && i+1 <= len(os.Args)-1 { // --conf chain33.toml 可以配置读入cli配置文件路径 configPath = os.Args[i+1] break } if strings.HasPrefix(arg, "--conf=") { // --conf="chain33.toml" configPath = strings.TrimPrefix(arg, "--conf=") break } } if configPath == "" { if name == "" { configPath = "chain33.toml" } else { configPath = name + ".toml" } } exist, _ := pathExists(configPath) var chain33Cfg *types.Chain33Config if exist { chain33Cfg = types.NewChain33Config(types.ReadFile(configPath)) } else { cfgstring := types.GetDefaultCfgstring() if ParaName != "" { cfgstring = strings.Replace(cfgstring, "Title=\"local\"", fmt.Sprintf("Title=\"%s\"", ParaName), 1) cfgstring = strings.Replace(cfgstring, "FixTime=false", "CoinSymbol=\"para\"", 1) } chain33Cfg = types.NewChain33Config(cfgstring) } types.SetCliSysParam(chain33Cfg.GetTitle(), chain33Cfg) rootCmd := &cobra.Command{ Use: chain33Cfg.GetTitle() + "-cli", Short: chain33Cfg.GetTitle() + " client tools", Version: fmt.Sprintf("%s %s", version.GetVersion(), version.BuildTime), } closeCmd := &cobra.Command{ Use: "close", Short: "Close " + chain33Cfg.GetTitle(), Run: func(cmd *cobra.Command, args []string) { rpcLaddr, err := cmd.Flags().GetString("rpc_laddr") if err != nil { panic(err) } // rpc, _ := jsonrpc.NewJSONClient(rpcLaddr) // rpc.Call("Chain33.CloseQueue", nil, nil) var res rpctypes.Reply ctx := jsonclient.NewRPCCtx(rpcLaddr, "Chain33.CloseQueue", nil, &res) ctx.Run() }, } rootCmd.AddCommand( commands.CertCmd(), commands.AccountCmd(), commands.BlockCmd(), commands.CoinsCmd(), commands.ExecCmd(), commands.MempoolCmd(), commands.NetCmd(), commands.SeedCmd(), commands.StatCmd(), commands.TxCmd(), commands.WalletCmd(), commands.VersionCmd(), commands.SystemCmd(), commands.OneStepSendCmd(), commands.OneStepSendCertTxCmd(), commands.BlacklistCmd(), closeCmd, commands.AssetCmd(), commands.NoneCmd(), commands.BtcScriptCmd(), ) //test tls is enable RPCAddr = testTLS(RPCAddr) pluginmgr.AddCmd(rootCmd) log.SetLogLevel("error") chain33Cfg.S("RPCAddr", RPCAddr) chain33Cfg.S("ParaName", ParaName) rootCmd.PersistentFlags().String("rpc_laddr", chain33Cfg.GStr("RPCAddr"), "http url") rootCmd.PersistentFlags().String("paraName", chain33Cfg.GStr("ParaName"), "parachain") rootCmd.PersistentFlags().String("title", chain33Cfg.GetTitle(), "get title name") rootCmd.PersistentFlags().MarkHidden("title") rootCmd.PersistentFlags().String("conf", "", "cli config") if err := rootCmd.Execute(); err != nil { fmt.Println(err) os.Exit(1) } } func testTLS(RPCAddr string) string { rpcaddr := RPCAddr if !strings.HasPrefix(rpcaddr, "http://") { return RPCAddr } // if http:// if rpcaddr[len(rpcaddr)-1] != '/' { rpcaddr += "/" } rpcaddr += "test" /* #nosec */ resp, err := http.Get(rpcaddr) if err != nil { return "https://" + RPCAddr[7:] } defer resp.Body.Close() if resp.StatusCode == 200 { return RPCAddr } return "https://" + RPCAddr[7:] } func pathExists(path string) (bool, error) { _, err :=
ath) if err == nil { return true, nil } if os.IsNotExist(err) { return false, nil } return false, err }
os.Stat(p
sampling.py
""" sampling.py We sample Metropolis-Hastings: * Random walk proposals * Langevin proposals * Langevin proposals with preconditioning * Hamiltonian MC * Hamiltonian MC with preconditioning NOTE: The functionality of this module is restricted to log-densities, i.e. densities of the form p(s) = exp(-E(s)). We work with E(s) only. The reason is that in Bayesian inference, evaluations of exp(-E(s)) are too instable in a numerical sense. """ import collections from abc import ABC, abstractmethod import numpy as np from difflikelihoods import logdensity def metropolishastings_rw(logpdf, nsamps, initstate, pwidth, ninits): """ Convenience function for Metropolis-Hastings sampling with random walk proposal kernel. """ logdens = logdensity.LogDensity(logpdf) rwmh = RandomWalkMH(logdens) return rwmh.sample_nd(nsamps, initstate, pwidth, ninits) def metropolishastings_lang(logpdf, loggrad, nsamps, initstate, pwidth, ninits): """ Convenience function for Metropolis-Hastings sampling with Langevin dynamics proposal kernel. """ logdens = logdensity.LogDensity(logpdf, loggrad) langmh = LangevinMH(logdens) return langmh.sample_nd(nsamps, initstate, pwidth, ninits) def metropolishastings_plang( logpdf, loggrad, loghess, nsamps, initstate, pwidth, ninits ): """ Convenience function for Metropolis-Hastings sampling with Riemannian (preconditioned) Langevin dynamics proposal kernel. """ logdens = logdensity.LogDensity(logpdf, loggrad, loghess) plangmh = PrecondLangevinMH(logdens) return plangmh.sample_nd(nsamps, initstate, pwidth, ninits) def metropolishastings_ham( logpdf, loggrad, nsamps, initstate, stepsize, nsteps, ninits ): """ Convenience function for Hamiltonian MCMC. """ logdens = logdensity.LogDensity(logpdf, loggrad) hmc = HamiltonianMC(logdens, nsteps) return hmc.sample_nd(nsamps, initstate, stepsize, ninits) def metropolishastings_pham( logpdf, loggrad, loghess, nsamps, initstate, stepsize, nsteps, ninits ): """ Convenience function for preconditioned Hamiltonian MCMC. """ logdens = logdensity.LogDensity(logpdf, loggrad, loghess) phmc = PrecondHamiltonianMC(logdens, nsteps) return phmc.sample_nd(nsamps, initstate, stepsize, ninits) # Convenience data structure. MCMCState = collections.namedtuple("MCMCState", "state logdens loggrad loghess") class MetropolisHastings(ABC): """ Abstract Metropolis-Hastings class. Contains everything but the proposal kernels. """ def __init__(self, logdens): """ Initialise MH sampler with a log-density function. Args: logdens: LogDensity object, evaluations of a negative log- density and derivatives """ self.logdens = logdens def sample_nd(self, nsamps, init_state, pwidth, ninits=None, *optional): """ """ assert init_state_is_array( init_state ), "Please enter a (d,) dimensional initial state" states, logprobs = np.zeros((nsamps, len(init_state))), np.zeros(nsamps) accepted = 0 if ninits is None: ninits = 0 currstate = self.evaluate_logdens(init_state) states[0], logprobs[0] = currstate.state, currstate.logdens for idx in range(1, nsamps): if idx < ninits: proposal, corrfact = self.generate_proposal(currstate, pwidth) else: proposal, corrfact = self.generate_proposal(currstate, 0.2 * pwidth) currstate, is_accept = self.accept_or_reject( currstate, proposal, corrfact, idx, ninits ) states[idx], logprobs[idx] = ( currstate.state.copy(), currstate.logdens.copy(), ) if idx >= ninits: accepted = accepted + int(is_accept) ratio = accepted / nsamps return states, logprobs, ratio def evaluate_logdens(self, loc): """ """ logdenseval = self.logdens.eval(loc) if self.logdens.has_gradient: gradeval = self.logdens.gradeval(loc) else: gradeval = 0 if self.logdens.has_hessian: hesseval = self.logdens.hesseval(loc) else: hesseval = 0 return MCMCState( state=loc, logdens=logdenseval, loggrad=gradeval, loghess=hesseval ) def accept_or_reject(self, currstate, proposal, corrfact, idx, ninits): """ """ logaccprob = self.get_logaccprob(currstate, proposal, corrfact, idx, ninits) if logaccprob < 0 or logaccprob < -np.log(np.random.rand()): state = proposal is_accept = True else: state = currstate is_accept = False return state, is_accept def get_logaccprob(self, currstate, proposal, corrfact, idx, ninits): """ Returns NEGATIVE log acceptance probability, i.e. corrected proposal - corrected currstate """ if idx < ninits: corrfact = -corrfact return (corrfact) + (proposal.logdens - currstate.logdens) @abstractmethod def generate_proposal(self, *args): """ """ pass def init_state_is_array(init_state): """ Checks whether init_state is compliant with an Nd algorithm. That is, whether init_state is an (d,) np.ndarray. """ assert isinstance(init_state, np.ndarray), "Please enter init_state of shape (d,)" assert len(init_state.shape) == 1, "Please enter init_state of shape (d,)" return True class
(MetropolisHastings): """ """ def __init__(self, logdens): """ """ MetropolisHastings.__init__(self, logdens) def generate_proposal(self, currstate, pwidth): """ """ newloc = self.sample_randomwalk(currstate.state, pwidth) proposal = self.evaluate_logdens(newloc) corrfact = 0 return proposal, corrfact def sample_randomwalk(self, mean, var): """ """ return mean + np.sqrt(var) * np.random.randn(len(mean)) class LangevinMH(MetropolisHastings): """ """ def __init__(self, logdens): """ """ MetropolisHastings.__init__(self, logdens) def generate_proposal(self, currstate, pwidth): """ """ newloc = self.sample_langevin(currstate, pwidth) proposal = self.evaluate_logdens(newloc) corrfact = self.compute_corrfact_langevin(currstate, proposal, pwidth) return proposal, corrfact def sample_langevin(self, currstate, pwidth): """ """ noise = np.random.randn(len(currstate.state)) return ( currstate.state - pwidth * currstate.loggrad + np.sqrt(2 * pwidth) * noise ) def compute_corrfact_langevin(self, currstate, proposal, pwidth): """ """ lognomin = self.kernel_langevin(currstate, proposal, pwidth) logdenom = self.kernel_langevin(proposal, currstate, pwidth) return lognomin - logdenom def kernel_langevin(self, state1, state2, pwidth): """ """ state2_dyn = state2.state - pwidth * state2.loggrad dist = np.linalg.norm(state1.state - state2_dyn) ** 2 return 0.5 * dist / (2 * pwidth) class PrecondLangevinMH(MetropolisHastings): """ Preconditioning with (inverse) Hessian. """ def __init__(self, logdens): """ precondeval returns M (and not M^{-1}) as used in Cald&Gir """ MetropolisHastings.__init__(self, logdens) def generate_proposal(self, currstate, pwidth): """ """ newloc = self.sample_langevin(currstate, pwidth) proposal = self.evaluate_logdens(newloc) corrfact = self.compute_corrfact_langevin(currstate, proposal, pwidth) return proposal, corrfact def sample_langevin(self, currstate, pwidth): """ """ noise = np.random.multivariate_normal( np.zeros(len(currstate.loghess)), np.linalg.inv(currstate.loghess) ) prec_dyn = np.linalg.solve(currstate.loghess, currstate.loggrad) return currstate.state - pwidth * prec_dyn + np.sqrt(2 * pwidth) * noise def compute_corrfact_langevin(self, currstate, proposal, pwidth): """ """ lognomin = self.kernel_langevin(currstate, proposal, pwidth) logdenom = self.kernel_langevin(proposal, currstate, pwidth) return lognomin - logdenom def kernel_langevin(self, state1, state2, pwidth): """ """ prec_dyn = np.linalg.solve(state2.loghess, state2.loggrad) state2_dyn = state2.state - pwidth * prec_dyn difference = state1.state - state2_dyn return 0.5 * difference.dot(np.dot(state2.loghess, difference)) / (2 * pwidth) class HamiltonianMC(MetropolisHastings): """ """ def __init__(self, logdens, nsteps): """ """ MetropolisHastings.__init__(self, logdens) self.nsteps = nsteps def generate_proposal(self, currstate, pwidth): """ pwidth is used as stepsize for self.nsteps leapfrog steps. The correction factor is the quotient of the hamiltonian terms. """ momentum = np.random.multivariate_normal( np.zeros(len(currstate.state)), np.eye(len(currstate.state)) ) # hamilt = self.evaluate_hamiltonian(momentum, currstate) momentum_new, proposal = self.leapfrog_dynamics(momentum, currstate, pwidth) # prop_hamilt = self.evaluate_hamiltonian(momentum_new, proposal) corrfact = self.get_corrfact(momentum, momentum_new) return proposal, corrfact def leapfrog_dynamics(self, momentum, currstate, pwidth): """ """ proposal = currstate for idx in range(self.nsteps): momentum, proposal = self.compute_next_lfstep(momentum, proposal, pwidth) return momentum, proposal def compute_next_lfstep(self, momentum, proposal, pwidth): """ """ momentum = momentum - 0.5 * pwidth * proposal.loggrad pstate = proposal.state + pwidth * momentum proposal = self.evaluate_logdens(pstate) momentum = momentum - 0.5 * pwidth * proposal.loggrad return momentum, proposal def get_corrfact(self, mom_new, mom): """ """ return 0.5 * (mom_new.T @ mom_new - mom.T @ mom) class PrecondHamiltonianMC(MetropolisHastings): """ In fact, the true name would be either * Riemannian-Gaussian HMC: if the preconditioner depends on the state * Euclidean-Gaussian HMC: if the preconditioner is constant [Girolami and Calderhead, 2011; Betancourt, 2018] """ def __init__(self, logdens, nsteps): """ evalprecond returns M (and not M^{-1}) as used in Cald&Gir. M is the Hessian """ MetropolisHastings.__init__(self, logdens) self.nsteps = nsteps def generate_proposal(self, currstate, pwidth): """ pwidth is used as stepsize for self.nsteps leapfrog steps. The correction factor is the quotient of the hamiltonian terms. """ momentum = np.random.multivariate_normal( np.zeros(len(currstate.state)), currstate.loghess ) momentum_new, proposal = self.leapfrog_dynamics(momentum, currstate, pwidth) corrfact = self.get_corrfact(momentum, momentum_new, currstate, proposal) return proposal, corrfact def leapfrog_dynamics(self, momentum, currstate, pwidth): """ """ proposal = currstate for idx in range(self.nsteps): momentum, proposal = self.compute_next_lfstep(momentum, proposal, pwidth) return momentum, proposal def compute_next_lfstep(self, momentum, proposal, pwidth): """ """ momentum = momentum - 0.5 * pwidth * proposal.loggrad pstate = proposal.state + pwidth * np.linalg.solve(proposal.loghess, momentum) proposal = self.evaluate_logdens(pstate) momentum = momentum - 0.5 * pwidth * proposal.loggrad return momentum, proposal def get_corrfact(self, mom, mom_new, currstate, proposal): """ """ return 0.5 * ( mom_new.T @ np.linalg.solve(proposal.loghess, mom_new) + np.log(np.linalg.det(proposal.loghess)) - mom.T @ np.linalg.solve(currstate.loghess, mom) - np.log(np.linalg.det(currstate.loghess)) )
RandomWalkMH
0.8332e50f093c2413b5ca.hot-update.js
webpackHotUpdate(0,{10:function(t,e,i){"use strict";var d={data:()=>({columns:[{title:"序号",key:"id",width:100,fixed:"left"},{title:"名称",key:"name",width:400},{title:"缩略图",key:"thumbnail",width:200},{title:"路径",key:"path",width:200},{title:"上传时间",key:"upload_time",width:200},{title:"上传者",key:"upload_user",width:100},{title:"Action",key:"action",fixed:"right",width:120,render:(t,e)=>t("div",[t("Button",{props:{type:"text",size:"small"}},"View"),t("Button",{props:{type:"text",size:"small"}},"Edit")])}],data:[]}),mounted(){}};e.a=d}});
eventplay32.go
package update import ( "github.com/ff14wed/aetherometer/core/datasheet" "github.com/ff14wed/aetherometer/core/models" "github.com/ff14wed/aetherometer/core/store" "github.com/ff14wed/xivnet/v3" "github.com/ff14wed/xivnet/v3/datatypes" ) func init() { registerIngressHandler(new(datatypes.EventPlay32), newDirectorPlaySceneUpdate) } // TODO: Add testing func newDirectorPlaySceneUpdate(streamID int, b *xivnet.Block, d *datasheet.Collection) store.Update { data := b.Data.(*datatypes.EventPlay32) if craftState, matches := data.Data.(datatypes.CraftState); matches { unknownFlags := craftState.U6[0] >> 16 action := d.ActionData.GetAction(craftState.CraftAction) return craftingInfoUpdate{ streamID: streamID, craftingInfo: &models.CraftingInfo{ LastCraftActionID: int(craftState.CraftAction), LastCraftActionName: action.Name, StepNum: int(craftState.StepNum), Progress: int(craftState.Progress), ProgressDelta: int(craftState.ProgressDelta), Quality: int(craftState.Quality), QualityDelta: int(craftState.QualityDelta), HqChance: int(craftState.HQChance), Durability: int(craftState.Durability), DurabilityDelta: int(craftState.DurabilityDelta), CurrentCondition: int(craftState.CurrentCondition), PreviousCondition: int(craftState.PreviousCondition), ReuseProc: (unknownFlags == 0x4000), }, } } return nil }
type craftingInfoUpdate struct { streamID int craftingInfo *models.CraftingInfo } func (u craftingInfoUpdate) ModifyStore(streams *store.Streams) ([]models.StreamEvent, []models.EntityEvent, error) { stream, found := streams.Map[u.streamID] if !found { return nil, nil, ErrorStreamNotFound } if u.craftingInfo != nil && (u.craftingInfo.Recipe == nil || u.craftingInfo.Recipe.ID == 0) { if stream.CraftingInfo != nil { u.craftingInfo.Recipe = stream.CraftingInfo.Recipe } } stream.CraftingInfo = u.craftingInfo return []models.StreamEvent{ { StreamID: u.streamID, Type: models.UpdateCraftingInfo{ CraftingInfo: u.craftingInfo, }, }, }, nil, nil }
utils.py
# -*- coding:utf-8 -*- # pylint: disable=no-member import csv import numpy as np from scipy.sparse.linalg import eigs from .metrics import mean_absolute_error, mean_squared_error, masked_mape_np def search_data(sequence_length, num_of_batches, label_start_idx, num_for_predict, units, points_per_hour): ''' Parameters ---------- sequence_length: int, length of all history data num_of_batches: int, the number of batches will be used for training label_start_idx: int, the first index of predicting target num_for_predict: int, the number of points will be predicted for each sample units: int, week: 7 * 24, day: 24, recent(hour): 1 points_per_hour: int, number of points per hour, depends on data Returns ---------- list[(start_idx, end_idx)] ''' if points_per_hour < 0: raise ValueError("points_per_hour should be greater than 0!") if label_start_idx + num_for_predict > sequence_length: return None x_idx = [] for i in range(1, num_of_batches + 1): start_idx = label_start_idx - points_per_hour * units * i end_idx = start_idx + num_for_predict # wd: this could overlap with 'label_start_index', e.g. when num_for_predict is larger than 12 (one hour) if start_idx >= 0: x_idx.append((start_idx, end_idx)) else: return None if len(x_idx) != num_of_batches: return None return x_idx[::-1] def get_sample_indices(data_sequence, num_of_weeks, num_of_days, num_of_hours, label_start_idx, num_for_predict, points_per_hour=12): """ Parameters ---------- data_sequence: np.ndarray shape is (sequence_length, num_of_vertices, num_of_features) num_of_weeks, num_of_days, num_of_hours: int label_start_idx: int, the first index of predicting target num_for_predict: int, the number of points will be predicted for each sample points_per_hour: int, default 12, number of points per hour Returns ---------- week_sample: np.ndarray shape is (num_of_weeks * points_per_hour, # wd: points_per_hour should be num_for_predict?? num_of_vertices, num_of_features) day_sample: np.ndarray shape is (num_of_days * points_per_hour, num_of_vertices, num_of_features) hour_sample: np.ndarray shape is (num_of_hours * points_per_hour, num_of_vertices, num_of_features) target: np.ndarray shape is (num_for_predict, num_of_vertices, num_of_features) """ week_indices = search_data(data_sequence.shape[0], num_of_weeks, label_start_idx, num_for_predict, 7 * 24, points_per_hour) if not week_indices: return None day_indices = search_data(data_sequence.shape[0], num_of_days, label_start_idx, num_for_predict, 24, points_per_hour) if not day_indices: return None hour_indices = search_data(data_sequence.shape[0], num_of_hours, label_start_idx, num_for_predict, 1, points_per_hour) if not hour_indices: return None week_sample = np.concatenate([data_sequence[i: j] for i, j in week_indices], axis=0) day_sample = np.concatenate([data_sequence[i: j] for i, j in day_indices], axis=0) hour_sample = np.concatenate([data_sequence[i: j] for i, j in hour_indices], axis=0) target = data_sequence[label_start_idx: label_start_idx + num_for_predict] return week_sample, day_sample, hour_sample, target def get_adjacency_matrix(distance_df_filename, num_of_vertices): ''' Parameters ---------- distance_df_filename: str, path of the csv file contains edges information num_of_vertices: int, the number of vertices Returns ---------- A: np.ndarray, adjacency matrix ''' with open(distance_df_filename, 'r') as f: reader = csv.reader(f) header = f.__next__() edges = [(int(i[0]), int(i[1])) for i in reader] A = np.zeros((int(num_of_vertices), int(num_of_vertices)), dtype=np.float32) for i, j in edges: A[i, j] = 1 return A def scaled_Laplacian(W): ''' compute \tilde{L} Parameters ---------- W: np.ndarray, shape is (N, N), N is the num of vertices Returns ---------- scaled_Laplacian: np.ndarray, shape (N, N) ''' assert W.shape[0] == W.shape[1] D = np.diag(np.sum(W, axis=1)) L = D - W lambda_max = eigs(L, k=1, which='LR')[0].real return (2 * L) / lambda_max - np.identity(W.shape[0]) def
(L_tilde, K): ''' compute a list of chebyshev polynomials from T_0 to T_{K-1} Parameters ---------- L_tilde: scaled Laplacian, np.ndarray, shape (N, N) K: the maximum order of chebyshev polynomials Returns ---------- cheb_polynomials: list[np.ndarray], length: K, from T_0 to T_{K-1} ''' N = L_tilde.shape[0] cheb_polynomials = [np.identity(N), L_tilde.copy()] for i in range(2, K): cheb_polynomials.append( 2 * L_tilde * cheb_polynomials[i - 1] - cheb_polynomials[i - 2]) return cheb_polynomials def compute_val_loss(net, val_loader, loss_function, sw, epoch, device): """ compute mean loss on validation set Parameters ---------- net: model val_loader: DataLoader loss_function: func sw: SummaryWriter. TODO: to be implemented epoch: int, current epoch """ val_loader_length = len(val_loader) tmp = [] for index, (val_w, val_d, val_r, val_t) in enumerate(val_loader): val_w = val_w.to(device) val_d = val_d.to(device) val_r = val_r.to(device) val_t = val_t.to(device) output = net([val_w, val_d, val_r]) l = loss_function(output, val_t) # l is a tensor, with single value tmp.append(l.item()) print('validation batch %s / %s, loss: %.2f' % ( index + 1, val_loader_length, l.item())) validation_loss = sum(tmp) / len(tmp) if sw: sw.add_scalar(tag='validation_loss', value=validation_loss, global_step=epoch) print('epoch: %s, validation loss: %.2f' % (epoch, validation_loss)) def predict(net, test_loader, device): """ predict Parameters ---------- net: model test_loader: DataLoader Returns ---------- prediction: np.ndarray, shape is (num_of_samples, num_of_vertices, num_for_predict) """ test_loader_length = len(test_loader) prediction = [] for index, (test_w, test_d, test_r, _) in enumerate(test_loader): test_w = test_w.to(device) test_d = test_d.to(device) test_r = test_r.to(device) prediction.append(net([test_w, test_d, test_r]).cpu().numpy()) print('predicting testing set batch %s / %s' % (index + 1, test_loader_length)) prediction = np.concatenate(prediction, 0) return prediction def evaluate(net, test_loader, true_value, num_of_vertices, sw, epoch, device): """ compute MAE, RMSE, MAPE scores of the prediction for 3, 6, 12 points on testing set Parameters ---------- net: model test_loader: DataLoader true_value: np.ndarray, all ground truth of testing set shape is (num_of_samples, num_for_predict, num_of_vertices) num_of_vertices: int, number of vertices sw: SummaryWriter. TODO: to be implemented. epoch: int, current epoch """ prediction = predict(net, test_loader, device) prediction = (prediction.transpose((0, 2, 1)) .reshape(prediction.shape[0], -1)) for i in [3, 6, 12]: print('current epoch: %s, predict %s points' % (epoch, i)) mae = mean_absolute_error(true_value[:, : i * num_of_vertices], prediction[:, : i * num_of_vertices]) rmse = mean_squared_error(true_value[:, : i * num_of_vertices], prediction[:, : i * num_of_vertices]) ** 0.5 mape = masked_mape_np(true_value[:, : i * num_of_vertices], prediction[:, : i * num_of_vertices], 0) print('MAE: %.2f' % (mae)) print('RMSE: %.2f' % (rmse)) print('MAPE: %.2f' % (mape)) print() if sw: sw.add_scalar(tag='MAE_%s_points' % (i), value=mae, global_step=epoch) sw.add_scalar(tag='RMSE_%s_points' % (i), value=rmse, global_step=epoch) sw.add_scalar(tag='MAPE_%s_points' % (i), value=mape, global_step=epoch)
cheb_polynomial
rit.rs
# ! [ doc = "Repetitive Interrupt Timer (RIT)" ] use core::ops::Deref; use cortex_m::peripheral::Peripheral; # [ doc = "Repetitive Interrupt Timer (RIT)" ] pub const RITIMER: Peripheral<RITIMER> = unsafe { Peripheral::new(1074462720) }; use vcell::VolatileCell; # [ doc = r" Register block" ] # [ repr ( C ) ] pub struct RegisterBlock { # [ doc = "0x00 - Compare register" ] pub compval: COMPVAL, # [ doc = "0x04 - Mask register. This register holds the 32-bit mask value. A 1 written to any bit will force a compare on the corresponding bit of the counter and compare register." ] pub mask: MASK, # [ doc = "0x08 - Control register." ] pub ctrl: CTRL, # [ doc = "0x0c - 32-bit counter" ] pub counter: COUNTER, } # [ doc = "Compare register" ] pub struct COMPVAL { register: VolatileCell<u32>, } # [ doc = "Compare register" ] pub mod compval { # [ doc = r" Value read from the register" ] pub struct R { bits: u32, } # [ doc = r" Value to write to the register" ] pub struct W { bits: u32, } impl super::COMPVAL { # [ doc = r" Modifies the contents of the register" ] # [ inline ( always ) ] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } # [ doc = r" Reads the contents of the register" ] # [ inline ( always ) ] pub fn read(&self) -> R { R { bits: self.register.get() } } # [ doc = r" Writes to the register" ] # [ inline ( always ) ] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } # [ doc = r" Writes the reset value to the register" ] # [ inline ( always ) ] pub fn reset(&self) { self.write(|w| w) } } # [ doc = r" Value of the field" ] pub struct RICOMPR { bits: u32, } impl RICOMPR { # [ doc = r" Value of the field as raw bits" ] # [ inline ( always ) ] pub fn bits(&self) -> u32 { self.bits } } # [ doc = r" Proxy" ] pub struct _RICOMPW<'a> { w: &'a mut W, } impl<'a> _RICOMPW<'a> { # [ doc = r" Writes raw bits to the field" ] # [ inline ( always ) ] pub unsafe fn bits(self, value: u32) -> &'a mut W { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { # [ doc = r" Value of the register as raw bits" ] # [ inline ( always ) ] pub fn bits(&self) -> u32 { self.bits } # [ doc = "Bits 0:31 - Compare register. Holds the compare value which is compared to the counter." ] # [ inline ( always ) ] pub fn ricomp(&self) -> RICOMPR { let bits = { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u32 }; RICOMPR { bits } } } impl W { # [ doc = r" Reset value of the register" ] # [ inline ( always ) ] pub fn reset_value() -> W { W { bits: 4294967295 } } # [ doc = r" Writes raw bits to the register" ] # [ inline ( always ) ] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } # [ doc = "Bits 0:31 - Compare register. Holds the compare value which is compared to the counter." ] # [ inline ( always ) ] pub fn ricomp(&mut self) -> _RICOMPW { _RICOMPW { w: self } } } } # [ doc = "Mask register. This register holds the 32-bit mask value. A 1 written to any bit will force a compare on the corresponding bit of the counter and compare register." ] pub struct MASK { register: VolatileCell<u32>, } # [ doc = "Mask register. This register holds the 32-bit mask value. A 1 written to any bit will force a compare on the corresponding bit of the counter and compare register." ] pub mod mask { # [ doc = r" Value read from the register" ] pub struct R { bits: u32, } # [ doc = r" Value to write to the register" ] pub struct W { bits: u32, } impl super::MASK { # [ doc = r" Modifies the contents of the register" ] # [ inline ( always ) ] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } # [ doc = r" Reads the contents of the register" ] # [ inline ( always ) ] pub fn read(&self) -> R { R { bits: self.register.get() } } # [ doc = r" Writes to the register" ] # [ inline ( always ) ] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } # [ doc = r" Writes the reset value to the register" ] # [ inline ( always ) ] pub fn reset(&self) { self.write(|w| w) } } # [ doc = r" Value of the field" ] pub struct RIMASKR { bits: u32, } impl RIMASKR { # [ doc = r" Value of the field as raw bits" ] # [ inline ( always ) ] pub fn bits(&self) -> u32 { self.bits } } # [ doc = r" Proxy" ] pub struct _RIMASKW<'a> { w: &'a mut W, } impl<'a> _RIMASKW<'a> { # [ doc = r" Writes raw bits to the field" ] # [ inline ( always ) ] pub unsafe fn bits(self, value: u32) -> &'a mut W { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { # [ doc = r" Value of the register as raw bits" ] # [ inline ( always ) ] pub fn bits(&self) -> u32 { self.bits } # [ doc = "Bits 0:31 - Mask register. This register holds the 32-bit mask value. A one written to any bit overrides the result of the comparison for the corresponding bit of the counter and compare register (causes the comparison of the register bits to be always true)." ] # [ inline ( always ) ] pub fn rimask(&self) -> RIMASKR { let bits = { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u32 }; RIMASKR { bits } } } impl W { # [ doc = r" Reset value of the register" ] # [ inline ( always ) ] pub fn reset_value() -> W { W { bits: 0 } } # [ doc = r" Writes raw bits to the register" ] # [ inline ( always ) ] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } # [ doc = "Bits 0:31 - Mask register. This register holds the 32-bit mask value. A one written to any bit overrides the result of the comparison for the corresponding bit of the counter and compare register (causes the comparison of the register bits to be always true)." ] # [ inline ( always ) ] pub fn rimask(&mut self) -> _RIMASKW { _RIMASKW { w: self } } } } # [ doc = "Control register." ] pub struct CTRL { register: VolatileCell<u32>, } # [ doc = "Control register." ] pub mod ctrl { # [ doc = r" Value read from the register" ] pub struct R { bits: u32, } # [ doc = r" Value to write to the register" ] pub struct W { bits: u32, } impl super::CTRL { # [ doc = r" Modifies the contents of the register" ] # [ inline ( always ) ] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } # [ doc = r" Reads the contents of the register" ] # [ inline ( always ) ] pub fn read(&self) -> R { R { bits: self.register.get() } } # [ doc = r" Writes to the register" ] # [ inline ( always ) ] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } # [ doc = r" Writes the reset value to the register" ] # [ inline ( always ) ] pub fn reset(&self) { self.write(|w| w) } } # [ doc = "Possible values of the field `RITINT`" ] # [ derive ( Clone , Copy , Debug , PartialEq ) ] pub enum RITINTR { # [ doc = "This bit is set to 1 by hardware whenever the counter value equals the masked compare value specified by the contents of RICOMPVAL and RIMASK registers. Writing a 1 to this bit will clear it to 0. Writing a 0 has no effect." ] THIS_BIT_IS_SET_TO_1, # [ doc = "The counter value does not equal the masked compare value." ] THE_COUNTER_VALUE_DO, } impl RITINTR { # [ doc = r" Returns `true` if the bit is clear (0)" ] # [ inline ( always ) ] pub fn is_clear(&self) -> bool { !self.bit() } # [ doc = r" Returns `true` if the bit is set (1)" ] # [ inline ( always ) ] pub fn is_set(&self) -> bool { self.bit() } # [ doc = r" Value of the field as raw bits" ] # [ inline ( always ) ] pub fn bit(&self) -> bool { match *self { RITINTR::THIS_BIT_IS_SET_TO_1 => true, RITINTR::THE_COUNTER_VALUE_DO => false, } } # [ allow ( missing_docs ) ] # [ doc ( hidden ) ] # [ inline ( always ) ] pub fn _from(value: bool) -> RITINTR { match value { true => RITINTR::THIS_BIT_IS_SET_TO_1, false => RITINTR::THE_COUNTER_VALUE_DO, } } # [ doc = "Checks if the value of the field is `THIS_BIT_IS_SET_TO_1`" ] # [ inline ( always ) ] pub fn is_this_bit_is_set_to_1(&self) -> bool { *self == RITINTR::THIS_BIT_IS_SET_TO_1 } # [ doc = "Checks if the value of the field is `THE_COUNTER_VALUE_DO`" ] # [ inline ( always ) ] pub fn is_the_counter_value_do(&self) -> bool { *self == RITINTR::THE_COUNTER_VALUE_DO } } # [ doc = "Possible values of the field `RITENCLR`" ] # [ derive ( Clone , Copy , Debug , PartialEq ) ] pub enum RITENCLRR { # [ doc = "The timer will be cleared to 0 whenever the counter value equals the masked compare value specified by the contents of RICOMPVAL and RIMASK registers. This will occur on the same clock that sets the interrupt flag." ] THE_TIMER_WILL_BE_CL, # [ doc = "The timer will not be cleared to 0." ] THE_TIMER_WILL_NOT_B, } impl RITENCLRR { # [ doc = r" Returns `true` if the bit is clear (0)" ] # [ inline ( always ) ] pub fn is_clear(&self) -> bool { !self.bit() } # [ doc = r" Returns `true` if the bit is set (1)" ] # [ inline ( always ) ] pub fn is_set(&self) -> bool { self.bit() } # [ doc = r" Value of the field as raw bits" ] # [ inline ( always ) ] pub fn bit(&self) -> bool { match *self { RITENCLRR::THE_TIMER_WILL_BE_CL => true, RITENCLRR::THE_TIMER_WILL_NOT_B => false, } } # [ allow ( missing_docs ) ] # [ doc ( hidden ) ] # [ inline ( always ) ] pub fn _from(value: bool) -> RITENCLRR { match value { true => RITENCLRR::THE_TIMER_WILL_BE_CL, false => RITENCLRR::THE_TIMER_WILL_NOT_B, } } # [ doc = "Checks if the value of the field is `THE_TIMER_WILL_BE_CL`" ] # [ inline ( always ) ] pub fn is_the_timer_will_be_cl(&self) -> bool { *self == RITENCLRR::THE_TIMER_WILL_BE_CL } # [ doc = "Checks if the value of the field is `THE_TIMER_WILL_NOT_B`" ] # [ inline ( always ) ] pub fn is_the_timer_will_not_b(&self) -> bool { *self == RITENCLRR::THE_TIMER_WILL_NOT_B } } # [ doc = "Possible values of the field `RITENBR`" ] # [ derive ( Clone , Copy , Debug , PartialEq ) ] pub enum RITENBRR { # [ doc = "The timer is halted when the processor is halted for debugging." ] THE_TIMER_IS_HALTED_, # [ doc = "Debug has no effect on the timer operation." ] DEBUG_HAS_NO_EFFECT_, } impl RITENBRR { # [ doc = r" Returns `true` if the bit is clear (0)" ] # [ inline ( always ) ] pub fn is_clear(&self) -> bool { !self.bit() } # [ doc = r" Returns `true` if the bit is set (1)" ] # [ inline ( always ) ] pub fn is_set(&self) -> bool { self.bit() } # [ doc = r" Value of the field as raw bits" ] # [ inline ( always ) ] pub fn bit(&self) -> bool { match *self { RITENBRR::THE_TIMER_IS_HALTED_ => true, RITENBRR::DEBUG_HAS_NO_EFFECT_ => false, } } # [ allow ( missing_docs ) ] # [ doc ( hidden ) ] # [ inline ( always ) ] pub fn _from(value: bool) -> RITENBRR { match value { true => RITENBRR::THE_TIMER_IS_HALTED_, false => RITENBRR::DEBUG_HAS_NO_EFFECT_, } } # [ doc = "Checks if the value of the field is `THE_TIMER_IS_HALTED_`" ] # [ inline ( always ) ] pub fn is_the_timer_is_halted_(&self) -> bool { *self == RITENBRR::THE_TIMER_IS_HALTED_ } # [ doc = "Checks if the value of the field is `DEBUG_HAS_NO_EFFECT_`" ] # [ inline ( always ) ] pub fn is_debug_has_no_effect_(&self) -> bool { *self == RITENBRR::DEBUG_HAS_NO_EFFECT_ } } # [ doc = "Possible values of the field `RITEN`" ] # [ derive ( Clone , Copy , Debug , PartialEq ) ] pub enum RITENR { # [ doc = "Timer enabled. This can be overruled by a debug halt if enabled in bit 2." ] TIMER_ENABLED_THIS_, # [ doc = "Timer disabled." ] TIMER_DISABLED_, } impl RITENR { # [ doc = r" Returns `true` if the bit is clear (0)" ] # [ inline ( always ) ] pub fn is_clear(&self) -> bool { !self.bit() } # [ doc = r" Returns `true` if the bit is set (1)" ] # [ inline ( always ) ] pub fn is_set(&self) -> bool { self.bit() } # [ doc = r" Value of the field as raw bits" ] # [ inline ( always ) ] pub fn bit(&self) -> bool { match *self { RITENR::TIMER_ENABLED_THIS_ => true, RITENR::TIMER_DISABLED_ => false, } } # [ allow ( missing_docs ) ] # [ doc ( hidden ) ] # [ inline ( always ) ] pub fn _from(value: bool) -> RITENR { match value { true => RITENR::TIMER_ENABLED_THIS_, false => RITENR::TIMER_DISABLED_, } } # [ doc = "Checks if the value of the field is `TIMER_ENABLED_THIS_`" ] # [ inline ( always ) ] pub fn is_timer_enabled_this_(&self) -> bool { *self == RITENR::TIMER_ENABLED_THIS_ } # [ doc = "Checks if the value of the field is `TIMER_DISABLED_`" ] # [ inline ( always ) ] pub fn is_timer_disabled_(&self) -> bool { *self == RITENR::TIMER_DISABLED_ } } # [ doc = "Values that can be written to the field `RITINT`" ] pub enum RITINTW { # [ doc = "This bit is set to 1 by hardware whenever the counter value equals the masked compare value specified by the contents of RICOMPVAL and RIMASK registers. Writing a 1 to this bit will clear it to 0. Writing a 0 has no effect." ] THIS_BIT_IS_SET_TO_1, # [ doc = "The counter value does not equal the masked compare value." ] THE_COUNTER_VALUE_DO, } impl RITINTW { # [ allow ( missing_docs ) ] # [ doc ( hidden ) ] # [ inline ( always ) ] pub fn _bits(&self) -> bool
} # [ doc = r" Proxy" ] pub struct _RITINTW<'a> { w: &'a mut W, } impl<'a> _RITINTW<'a> { # [ doc = r" Writes `variant` to the field" ] # [ inline ( always ) ] pub fn variant(self, variant: RITINTW) -> &'a mut W { { self.bit(variant._bits()) } } # [ doc = "This bit is set to 1 by hardware whenever the counter value equals the masked compare value specified by the contents of RICOMPVAL and RIMASK registers. Writing a 1 to this bit will clear it to 0. Writing a 0 has no effect." ] # [ inline ( always ) ] pub fn this_bit_is_set_to_1(self) -> &'a mut W { self.variant(RITINTW::THIS_BIT_IS_SET_TO_1) } # [ doc = "The counter value does not equal the masked compare value." ] # [ inline ( always ) ] pub fn the_counter_value_do(self) -> &'a mut W { self.variant(RITINTW::THE_COUNTER_VALUE_DO) } # [ doc = r" Writes raw bits to the field" ] # [ inline ( always ) ] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } # [ doc = "Values that can be written to the field `RITENCLR`" ] pub enum RITENCLRW { # [ doc = "The timer will be cleared to 0 whenever the counter value equals the masked compare value specified by the contents of RICOMPVAL and RIMASK registers. This will occur on the same clock that sets the interrupt flag." ] THE_TIMER_WILL_BE_CL, # [ doc = "The timer will not be cleared to 0." ] THE_TIMER_WILL_NOT_B, } impl RITENCLRW { # [ allow ( missing_docs ) ] # [ doc ( hidden ) ] # [ inline ( always ) ] pub fn _bits(&self) -> bool { match *self { RITENCLRW::THE_TIMER_WILL_BE_CL => true, RITENCLRW::THE_TIMER_WILL_NOT_B => false, } } } # [ doc = r" Proxy" ] pub struct _RITENCLRW<'a> { w: &'a mut W, } impl<'a> _RITENCLRW<'a> { # [ doc = r" Writes `variant` to the field" ] # [ inline ( always ) ] pub fn variant(self, variant: RITENCLRW) -> &'a mut W { { self.bit(variant._bits()) } } # [ doc = "The timer will be cleared to 0 whenever the counter value equals the masked compare value specified by the contents of RICOMPVAL and RIMASK registers. This will occur on the same clock that sets the interrupt flag." ] # [ inline ( always ) ] pub fn the_timer_will_be_cl(self) -> &'a mut W { self.variant(RITENCLRW::THE_TIMER_WILL_BE_CL) } # [ doc = "The timer will not be cleared to 0." ] # [ inline ( always ) ] pub fn the_timer_will_not_b(self) -> &'a mut W { self.variant(RITENCLRW::THE_TIMER_WILL_NOT_B) } # [ doc = r" Writes raw bits to the field" ] # [ inline ( always ) ] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 1; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } # [ doc = "Values that can be written to the field `RITENBR`" ] pub enum RITENBRW { # [ doc = "The timer is halted when the processor is halted for debugging." ] THE_TIMER_IS_HALTED_, # [ doc = "Debug has no effect on the timer operation." ] DEBUG_HAS_NO_EFFECT_, } impl RITENBRW { # [ allow ( missing_docs ) ] # [ doc ( hidden ) ] # [ inline ( always ) ] pub fn _bits(&self) -> bool { match *self { RITENBRW::THE_TIMER_IS_HALTED_ => true, RITENBRW::DEBUG_HAS_NO_EFFECT_ => false, } } } # [ doc = r" Proxy" ] pub struct _RITENBRW<'a> { w: &'a mut W, } impl<'a> _RITENBRW<'a> { # [ doc = r" Writes `variant` to the field" ] # [ inline ( always ) ] pub fn variant(self, variant: RITENBRW) -> &'a mut W { { self.bit(variant._bits()) } } # [ doc = "The timer is halted when the processor is halted for debugging." ] # [ inline ( always ) ] pub fn the_timer_is_halted_(self) -> &'a mut W { self.variant(RITENBRW::THE_TIMER_IS_HALTED_) } # [ doc = "Debug has no effect on the timer operation." ] # [ inline ( always ) ] pub fn debug_has_no_effect_(self) -> &'a mut W { self.variant(RITENBRW::DEBUG_HAS_NO_EFFECT_) } # [ doc = r" Writes raw bits to the field" ] # [ inline ( always ) ] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } # [ doc = "Values that can be written to the field `RITEN`" ] pub enum RITENW { # [ doc = "Timer enabled. This can be overruled by a debug halt if enabled in bit 2." ] TIMER_ENABLED_THIS_, # [ doc = "Timer disabled." ] TIMER_DISABLED_, } impl RITENW { # [ allow ( missing_docs ) ] # [ doc ( hidden ) ] # [ inline ( always ) ] pub fn _bits(&self) -> bool { match *self { RITENW::TIMER_ENABLED_THIS_ => true, RITENW::TIMER_DISABLED_ => false, } } } # [ doc = r" Proxy" ] pub struct _RITENW<'a> { w: &'a mut W, } impl<'a> _RITENW<'a> { # [ doc = r" Writes `variant` to the field" ] # [ inline ( always ) ] pub fn variant(self, variant: RITENW) -> &'a mut W { { self.bit(variant._bits()) } } # [ doc = "Timer enabled. This can be overruled by a debug halt if enabled in bit 2." ] # [ inline ( always ) ] pub fn timer_enabled_this_(self) -> &'a mut W { self.variant(RITENW::TIMER_ENABLED_THIS_) } # [ doc = "Timer disabled." ] # [ inline ( always ) ] pub fn timer_disabled_(self) -> &'a mut W { self.variant(RITENW::TIMER_DISABLED_) } # [ doc = r" Writes raw bits to the field" ] # [ inline ( always ) ] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 3; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { # [ doc = r" Value of the register as raw bits" ] # [ inline ( always ) ] pub fn bits(&self) -> u32 { self.bits } # [ doc = "Bit 0 - Interrupt flag" ] # [ inline ( always ) ] pub fn ritint(&self) -> RITINTR { RITINTR::_from({ const MASK: bool = true; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } # [ doc = "Bit 1 - Timer enable clear" ] # [ inline ( always ) ] pub fn ritenclr(&self) -> RITENCLRR { RITENCLRR::_from({ const MASK: bool = true; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } # [ doc = "Bit 2 - Timer enable for debug" ] # [ inline ( always ) ] pub fn ritenbr(&self) -> RITENBRR { RITENBRR::_from({ const MASK: bool = true; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } # [ doc = "Bit 3 - Timer enable." ] # [ inline ( always ) ] pub fn riten(&self) -> RITENR { RITENR::_from({ const MASK: bool = true; const OFFSET: u8 = 3; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } } impl W { # [ doc = r" Reset value of the register" ] # [ inline ( always ) ] pub fn reset_value() -> W { W { bits: 12 } } # [ doc = r" Writes raw bits to the register" ] # [ inline ( always ) ] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } # [ doc = "Bit 0 - Interrupt flag" ] # [ inline ( always ) ] pub fn ritint(&mut self) -> _RITINTW { _RITINTW { w: self } } # [ doc = "Bit 1 - Timer enable clear" ] # [ inline ( always ) ] pub fn ritenclr(&mut self) -> _RITENCLRW { _RITENCLRW { w: self } } # [ doc = "Bit 2 - Timer enable for debug" ] # [ inline ( always ) ] pub fn ritenbr(&mut self) -> _RITENBRW { _RITENBRW { w: self } } # [ doc = "Bit 3 - Timer enable." ] # [ inline ( always ) ] pub fn riten(&mut self) -> _RITENW { _RITENW { w: self } } } } # [ doc = "32-bit counter" ] pub struct COUNTER { register: VolatileCell<u32>, } # [ doc = "32-bit counter" ] pub mod counter { # [ doc = r" Value read from the register" ] pub struct R { bits: u32, } # [ doc = r" Value to write to the register" ] pub struct W { bits: u32, } impl super::COUNTER { # [ doc = r" Modifies the contents of the register" ] # [ inline ( always ) ] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } # [ doc = r" Reads the contents of the register" ] # [ inline ( always ) ] pub fn read(&self) -> R { R { bits: self.register.get() } } # [ doc = r" Writes to the register" ] # [ inline ( always ) ] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } # [ doc = r" Writes the reset value to the register" ] # [ inline ( always ) ] pub fn reset(&self) { self.write(|w| w) } } # [ doc = r" Value of the field" ] pub struct RICOUNTERR { bits: u32, } impl RICOUNTERR { # [ doc = r" Value of the field as raw bits" ] # [ inline ( always ) ] pub fn bits(&self) -> u32 { self.bits } } # [ doc = r" Proxy" ] pub struct _RICOUNTERW<'a> { w: &'a mut W, } impl<'a> _RICOUNTERW<'a> { # [ doc = r" Writes raw bits to the field" ] # [ inline ( always ) ] pub unsafe fn bits(self, value: u32) -> &'a mut W { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { # [ doc = r" Value of the register as raw bits" ] # [ inline ( always ) ] pub fn bits(&self) -> u32 { self.bits } # [ doc = "Bits 0:31 - 32-bit up counter. Counts continuously unless RITEN bit in RICTRL register is cleared or debug mode is entered (if enabled by the RITNEBR bit in RICTRL). Can be loaded to any value in software." ] # [ inline ( always ) ] pub fn ricounter(&self) -> RICOUNTERR { let bits = { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u32 }; RICOUNTERR { bits } } } impl W { # [ doc = r" Reset value of the register" ] # [ inline ( always ) ] pub fn reset_value() -> W { W { bits: 0 } } # [ doc = r" Writes raw bits to the register" ] # [ inline ( always ) ] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } # [ doc = "Bits 0:31 - 32-bit up counter. Counts continuously unless RITEN bit in RICTRL register is cleared or debug mode is entered (if enabled by the RITNEBR bit in RICTRL). Can be loaded to any value in software." ] # [ inline ( always ) ] pub fn ricounter(&mut self) -> _RICOUNTERW { _RICOUNTERW { w: self } } } } # [ doc = "Repetitive Interrupt Timer (RIT)" ] pub struct RITIMER { register_block: RegisterBlock, } impl Deref for RITIMER { type Target = RegisterBlock; fn deref(&self) -> &RegisterBlock { &self.register_block } }
{ match *self { RITINTW::THIS_BIT_IS_SET_TO_1 => true, RITINTW::THE_COUNTER_VALUE_DO => false, } }
main.js
/* eslint-disable @typescript-eslint/no-var-requires */ /* eslint-disable import/no-commonjs */ require('v8-compile-cache'); const { app, BrowserWindow, ipcMain, protocol, screen, session } = require('electron'); const path = require('path'); const Store = require('electron-store'); const args = process.argv.slice(1); const big = args.some(val => val === '--big'); const dev = args.some(val => val === '--serve'); const activateListeners = require('./helper/listener'); let window; let locale; let url; if (!dev) { const createProtocol = require('./helper/protocol'); const scheme = 'app'; protocol.registerSchemesAsPrivileged([{ scheme: scheme, privileges: { standard: true } }]); createProtocol(scheme, path.join(__dirname, 'dist')); locale = require('./helper/locale.js').getLocale(); } app.commandLine.appendSwitch('touch-events', 'enabled'); function createWindow() { const _store = new Store(); if (!dev) { session.defaultSession.webRequest.onHeadersReceived((details, callback) => { callback({ responseHeaders: { ...details.responseHeaders, // TODO: re-enable // "Content-Security-Policy": ["script-src 'self'"], }, }); }); } const mainScreen = screen.getPrimaryDisplay(); window = new BrowserWindow({ width: dev ? (big ? 1500 : 1200) : mainScreen.size.width, height: dev ? (big ? 600 : 450) : mainScreen.size.height, frame: dev, backgroundColor: '#353b48', webPreferences: {
contextIsolation: false, }, icon: path.join(__dirname, 'dist', 'assets', 'icon', 'icon.png'), }); if (dev) { url = 'http://localhost:4200'; window.webContents.openDevTools(); } else { url = `file://${__dirname}/dist/${locale}/index.html`; window.setFullScreen(true); } window.loadURL(url); activateListeners(ipcMain, window, app, url); window.on('closed', () => { window = null; }); } app.on('ready', createWindow); app.on('window-all-closed', () => { app.quit(); }); app.on('activate', () => { if (window === null) { createWindow(); } });
nodeIntegration: true, enableRemoteModule: true,
redirect_test.go
package httpd_test import ( "fmt" "github.com/zenreach/redirector/httpd" "net/http" "net/http/httptest" "testing" ) func newRedirectRequest(t *testing.T, url, host, proto string) *http.Request { req, err := http.NewRequest("GET", url, nil) if err != nil { t.Fatalf("failed to create request: %s", err) } if host != "" { req.Header.Set("Host", host) } if proto != "" { req.Header.Set("X-Forwarded-Proto", proto) } return req } func TestHTTPSUpgrade(t *testing.T) { t.Parallel() type testEntry struct { URL string Proto string Status int Location string } tests := []testEntry{ // HTTPS redirect with no header {URL: "http://example.com", Status: 307, Location: "https://example.com"}, {URL: "http://example.com/", Status: 307, Location: "https://example.com/"}, {URL: "http://example.com/no/slash", Status: 307, Location: "https://example.com/no/slash"}, {URL: "http://example.com/trailing/slash/", Status: 307, Location: "https://example.com/trailing/slash/"}, {URL: "http://127.0.0.1", Status: 307, Location: "https://127.0.0.1"}, // no redirect with no header {URL: "https://example.com", Status: 404}, {URL: "https://example.com/", Status: 404}, {URL: "https://127.0.0.1", Status: 404}, // HTTPS redirect with header {URL: "http://example.com", Proto: "http", Status: 307, Location: "https://example.com"}, {URL: "http://example.com", Proto: "HTTP", Status: 307, Location: "https://example.com"}, // no redirect with header {URL: "http://example.com", Proto: "https", Status: 404}, {URL: "http://example.com", Proto: "HTTPS", Status: 404}, // with port {URL: "http://example.com:8080", Status: 307, Location: "https://example.com:8080"}, {URL: "http://127.0.0.1:8080", Status: 307, Location: "https://127.0.0.1:8080"}, {URL: "https://example.com:8080", Status: 404}, {URL: "https://127.0.0.1:8080", Status: 404}, {URL: "http://example.com:8080", Proto: "http", Status: 307, Location: "https://example.com:8080"}, {URL: "http://127.0.0.1:8080", Proto: "http", Status: 307, Location: "https://127.0.0.1:8080"}, // strange situations {URL: "http://127.0.0.1:80", Status: 307, Location: "https://127.0.0.1:80"}, } handler := httpd.NewRedirectHandler(true, false) for n, item := range tests { test := item t.Run(fmt.Sprintf("Test%d", n), func(t *testing.T) { t.Parallel() w := httptest.NewRecorder() r := newRedirectRequest(t, test.URL, "", test.Proto) handler.ServeHTTP(w, r) if w.Code != test.Status { t.Errorf("wrong status code: %d != %d", w.Code, test.Status) } location := w.HeaderMap.Get("Location") if test.Status == 307 && location != test.Location { t.Errorf("wrong Location: %s != %s", location, test.Location) } }) } } func TestWWWUpgrade(t *testing.T) { t.Parallel() type testEntry struct { URL string Host string Status int Location string } tests := []testEntry{ // redirect with no header {URL: "http://example.com", Status: 307, Location: "http://www.example.com"}, {URL: "http://example.com/", Status: 307, Location: "http://www.example.com/"}, // no redirect with no header {URL: "http://www.example.com", Status: 404}, {URL: "http://www.example.com/", Status: 404}, // redirect with header {URL: "http://localhost", Host: "example.com", Status: 307, Location: "http://www.example.com"}, {URL: "http://localhost/", Host: "example.com", Status: 307, Location: "http://www.example.com/"}, // no redirect with header {URL: "http://localhost", Host: "www.example.com", Status: 404}, {URL: "http://localhost/", Host: "www.example.com", Status: 404}, // https not stripped {URL: "https://example.com", Status: 307, Location: "https://www.example.com"}, {URL: "https://example.com/", Status: 307, Location: "https://www.example.com/"}, // ip address not prepended {URL: "http://127.0.0.1", Status: 404}, {URL: "http://127.0.0.1/", Status: 404}, {URL: "https://127.0.0.1", Status: 404}, {URL: "https://127.0.0.1/", Status: 404}, // with port {URL: "http://example.com:8080", Status: 307, Location: "http://www.example.com:8080"}, {URL: "http://www.example.com:8080", Status: 404}, {URL: "http://localhost:8080", Host: "example.com", Status: 307, Location: "http://www.example.com:8080"}, {URL: "http://localhost:8080", Host: "www.example.com", Status: 404}, {URL: "https://example.com:8080", Status: 307, Location: "https://www.example.com:8080"}, {URL: "http://127.0.0.1:8080", Status: 404}, } handler := httpd.NewRedirectHandler(false, true) for n, item := range tests { test := item t.Run(fmt.Sprintf("Test%d", n), func(t *testing.T) { t.Parallel() w := httptest.NewRecorder() r := newRedirectRequest(t, test.URL, test.Host, "") handler.ServeHTTP(w, r) if w.Code != test.Status { t.Errorf("wrong status code: %d != %d", w.Code, test.Status) } location := w.HeaderMap.Get("Location") if test.Status == 307 && location != test.Location { t.Errorf("wrong Location: %s != %s", location, test.Location) } }) } } func TestBothUpgrades(t *testing.T) { t.Parallel() type testEntry struct { URL string Host string Proto string Status int Location string } tests := []testEntry{ // no redirect with no headers {URL: "https://www.example.com", Status: 404}, {URL: "https://www.example.com/", Status: 404}, // no redirect with headers {URL: "http://127.0.0.1", Proto: "https", Host: "www.example.com", Status: 404}, // redirect with no headers {URL: "http://example.com", Status: 307, Location: "https://www.example.com"}, {URL: "https://example.com", Status: 307, Location: "https://www.example.com"}, {URL: "http://www.example.com", Status: 307, Location: "https://www.example.com"}, // redirect with host header {URL: "http://127.0.0.1", Host: "example.com", Status: 307, Location: "https://www.example.com"}, {URL: "https://127.0.0.1", Host: "example.com", Status: 307, Location: "https://www.example.com"}, // redirect with proto header {URL: "http://example.com", Proto: "http", Status: 307, Location: "https://www.example.com"}, {URL: "http://www.example.com", Proto: "http", Status: 307, Location: "https://www.example.com"}, // redirect with headers {URL: "http://127.0.0.1", Proto: "http", Host: "example.com", Status: 307, Location: "https://www.example.com"}, {URL: "http://example.com", Proto: "http", Host: "example.com", Status: 307, Location: "https://www.example.com"}, } handler := httpd.NewRedirectHandler(true, true) for n, item := range tests { test := item t.Run(fmt.Sprintf("Test%d", n), func(t *testing.T) { t.Parallel() w := httptest.NewRecorder() r := newRedirectRequest(t, test.URL, test.Host, test.Proto) handler.ServeHTTP(w, r) if w.Code != test.Status { t.Errorf("wrong status code: %d != %d", w.Code, test.Status) } location := w.HeaderMap.Get("Location") if test.Status == 307 && location != test.Location
}) } }
{ t.Errorf("wrong Location: %s != %s", location, test.Location) }
edgegridauth.go
/* Copyright 2019 The Jetstack cert-manager contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package akamai import ( "bytes" "crypto/hmac" "crypto/rand" "crypto/sha256" "encoding/base64" "fmt" "io/ioutil" "net/http" "net/url" "sort" "strings" "time" "unicode" ) // EdgeGridAuth holds all values required to perform Akamai API Client Authentication. // See https://developer.akamai.com/introduction/Client_Auth.html. type EdgeGridAuth struct { ClientToken string ClientSecret string AccessToken string HeadersToSign []string MaxBody int now func() time.Time createNonce func() (string, error) } type signingData struct { timestamp string authHeader string dataToSign string } // edgeGridAuthTimeFormat is used for timestamps in request signatures. const edgeGridAuthTimeFormat = "20060102T15:04:05-0700" // yyyyMMddTHH:mm:ss+0000 const NoMaxBody = -1 // NewEdgeGridAuth returns a new request signer for Akamai EdgeGrid func NewEdgeGridAuth(clientToken, clientSecret, accessToken string, headersToSign ...string) *EdgeGridAuth { return &EdgeGridAuth{ ClientToken: clientToken, ClientSecret: clientSecret, AccessToken: accessToken, HeadersToSign: headersToSign, MaxBody: NoMaxBody, now: time.Now, createNonce: createRandomNonce, } } // SignRequest calculates the signature for Akamai Open API and adds it as the Authorization header. // The Authorization header starts with the signing algorithm moniker (name of the algorithm) used to sign the request. // The moniker below identifies EdgeGrid V1, hash message authentication code, SHA–256 as the hash standard. // This moniker is then followed by a space and an ordered list of name value pairs with each field separated by a semicolon. func (e *EdgeGridAuth) SignRequest(req *http.Request) error { signingData, err := e.signingData(req) if err != nil { return err } req.Header.Set("Authorization", fmt.Sprintf( "%ssignature=%s", signingData.authHeader, e.calculateRequestSignature(signingData))) return nil } func (e *EdgeGridAuth) calculateRequestSignature(signingData *signingData) string { return computeSignature( signingData.dataToSign, e.signingKey(signingData.timestamp)) } func (e *EdgeGridAuth) signingData(req *http.Request) (*signingData, error) { nonce, err := e.createNonce() if err != nil { return nil, err } timestamp := e.now().UTC().Format(edgeGridAuthTimeFormat) authHeader := fmt.Sprintf("EG1-HMAC-SHA256 client_token=%s;access_token=%s;timestamp=%s;nonce=%s;", e.ClientToken, e.AccessToken, timestamp, nonce) return &signingData{ timestamp: timestamp, authHeader: authHeader, dataToSign: e.dataToSign(req, authHeader), }, nil } // dataToSign includes the information from the HTTP request that is relevant to ensuring that the request is authentic. // This data set comprised of the request data combined with the authorization header value (excluding the signature field, // but including the ; right before the signature field). func (e *EdgeGridAuth) dataToSign(req *http.Request, authHeader string) string { var buffer bytes.Buffer buffer.WriteString(req.Method) buffer.WriteRune('\t') buffer.WriteString(req.URL.Scheme) buffer.WriteRune('\t') buffer.WriteString(req.URL.Host) buffer.WriteRune('\t') buffer.WriteString(relativeURL(req.URL)) buffer.WriteRune('\t') buffer.WriteString(e.canonicalizedHeaders(req)) buffer.WriteRune('\t') buffer.WriteString(e.computeBodyHash(req)) buffer.WriteRune('\t') buffer.WriteString(authHeader) return buffer.String() } // signingKey is derived from the client secret. // The signing key is computed as the base64 encoding of the SHA–256 HMAC of the timestamp string // (the field value included in the HTTP authorization header described above) with the client secret as the key. func (e *EdgeGridAuth) signingKey(timestamp string) string { return computeSignature(timestamp, e.ClientSecret) } // realtiveURL is the part of the URL that starts from the root path and includes the query string, with the handling of following special cases: // If the path is null or empty, set it to / (forward-slash). // If the path does not start with /, add / to the beginning. func relativeURL(url *url.URL) string { r
computeBodyHash returns the base64-encoded SHA–256 hash of the POST body. // For any other request methods, this field is empty. But the tac separator (\t) must be included. // The size of the POST body must be less than or equal to the value specified by the service. // Any request that does not meet this criteria SHOULD be rejected during the signing process, // as the request will be rejected by EdgeGrid. func (e *EdgeGridAuth) computeBodyHash(req *http.Request) string { if req.Body != nil { bodyBytes, _ := ioutil.ReadAll(req.Body) req.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) if req.Method == http.MethodPost && len(bodyBytes) > 0 { dataToHash := bodyBytes if e.MaxBody != NoMaxBody && len(dataToHash) > e.MaxBody { dataToHash = dataToHash[0:e.MaxBody] } sha256Sum := sha256.Sum256(dataToHash) return base64.StdEncoding.EncodeToString(sha256Sum[:]) } } return "" } // canonicalizedHeaders returns the request headers as a canonicalized string. // // The protocol does not support multiple request headers with the same header name. // Such requests SHOULD be rejected during the signing process. Otherwise, EdgeGrid // will not produce the intended results by rejecting such requests or removing all // (but one) duplicated headers. // // Header names are case-insensitive per rfc2616. // // For each entry in the list of headers designated by the service provider to include // in the signature in the specified order, the canonicalization of the request header // is done as follows: // // Get the first header value for the name. // Trim the leading and trailing white spaces. // Replace all repeated white spaces with a single space. // Concatenate the name:value pairs with the tab (\t) separator (name field is all in lower case). // Terminate the headers with another tab (\t) separator. // // NOTE: The canonicalized data is used for creating the signature only, as this step // might alter the header value. If a header in the list is not present in the request, // or the header value is empty, nothing for that header, neither the name nor the tab // separator, may be included. func (e *EdgeGridAuth) canonicalizedHeaders(req *http.Request) string { if len(e.HeadersToSign) < 1 { return "" } var headerNamesToSign []string for headerName := range req.Header { for _, sign := range e.HeadersToSign { if strings.EqualFold(sign, headerName) { headerNamesToSign = append(headerNamesToSign, headerName) break } } } if len(headerNamesToSign) < 1 { return "" } sort.Strings(headerNamesToSign) var buffer bytes.Buffer for _, headerName := range headerNamesToSign { for _, c := range headerName { buffer.WriteRune(unicode.ToLower(c)) } buffer.WriteRune(':') white := false empty := true for _, c := range req.Header.Get(headerName) { if unicode.IsSpace(c) { white = true } else { if white && !empty { buffer.WriteRune(' ') } buffer.WriteRune(unicode.ToLower(c)) empty = false white = false } } buffer.WriteRune('\t') } return buffer.String() } // calculateSignature is the base64-encoding of the SHA–256 HMAC of the data to sign with the signing key. func computeSignature(message string, secret string) string { key := []byte(secret) h := hmac.New(sha256.New, key) h.Write([]byte(message)) return base64.StdEncoding.EncodeToString(h.Sum(nil)) } func createRandomNonce() (string, error) { bytes := make([]byte, 18) _, err := rand.Read(bytes) if err != nil { return "", err } return base64.URLEncoding.EncodeToString(bytes), nil }
elativeURL := url.Path if relativeURL == "" { return "/" } if relativeURL[0] != '/' { relativeURL = "/" + relativeURL } if url.RawQuery != "" { relativeURL += "?" relativeURL += url.RawQuery } return relativeURL } //
benchmarks_test.go
package benchmarks import ( "fmt" "testing" "github.com/notargets/gocfd/model_problems/Euler2D" ) var ipDefault = &Euler2D.InputParameters{ Title: "", CFL: 1, FluxType: "Lax", InitType: "freestream", PolynomialOrder: 0, FinalTime: 0, Minf: 0, Gamma: 1.4, Alpha: 0, BCs: nil, LocalTimeStepping: false, MaxIterations: 5000, ImplicitSolver: false, Limiter: "", } func BenchmarkEulerSolve(b *testing.B) { var ( plotMesh = false pm = &Euler2D.PlotMeta{Plot: false, StepsBeforePlot: 100} Nmax = 2 FinalTime = 0.1 c = make([]*Euler2D.Euler, Nmax+1) ) ip := ipDefault ip.FinalTime = FinalTime ip.InitType = "ivortex" for n := 1; n <= Nmax; n++ { ip.PolynomialOrder = n //c[n] = Euler2D.NewEuler(FinalTime, n, "../../../DG2D/vortex-new.su2", 1.00, Euler2D.FLUX_LaxFriedrichs, Euler2D.IVORTEX, 0, 0, 1.4, 0, false, 5000, Euler2D.None, plotMesh, false, false) c[n] = Euler2D.NewEuler(ip, "../../../DG2D/vortex-new.su2", 0, plotMesh, false, false) } b.ResetTimer() // The benchmark loop for i := 0; i < b.N; i++ { // This is separate to enable easy performance and memory profiling for n := 1; n <= Nmax; n++ { c[n].Solve(pm) } } } func BenchmarkEulerGetFlowFunction(b *testing.B)
{ ip := ipDefault ip.Minf = 1. var ( q = [4]float64{1, 1, 1, 1} //c = Euler2D.NewEuler(1, 1, "", 1, Euler2D.FLUX_LaxFriedrichs, Euler2D.FREESTREAM, 1, 1, 1.4, 0, true, 1, Euler2D.None, false, false, false) c = Euler2D.NewEuler(ip, "", 1, false, false, false) GM1 = c.FS.Gamma - 1 ) var p float64 b.Run("direct compute", func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { qq := 0.5 * (q[1]*q[1] + q[2]*q[2]) / q[0] p = GM1 * (q[3] + qq) } }) b.Run("Optimized function call", func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { p = c.FS.GetFlowFunctionQQ(q, Euler2D.StaticPressure) } }) pressFunc := func(q [4]float64) (p float64) { qq := 0.5 * (q[1]*q[1] + q[2]*q[2]) / q[0] p = GM1 * (q[3] + qq) return } b.Run("inline function call", func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { p = pressFunc(q) } }) pressFunc2 := func(q0, q1, q2, q3 float64) (p float64) { qq := 0.5 * (q1*q1 + q2*q2) / q0 p = GM1 * (q3 + qq) return } b.Run("inline function call, discrete args", func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { p = pressFunc2(q[0], q[1], q[2], q[3]) } }) fmt.Printf("p = %8.5f\n", p) }
window.py
import logging import operator import time import traceback from pathlib import Path from typing import List, Type, Set, Tuple, Optional from PyQt5.QtCore import QEvent, Qt, pyqtSignal from PyQt5.QtGui import QIcon, QWindowStateChangeEvent, QCursor from PyQt5.QtWidgets import QWidget, QVBoxLayout, QCheckBox, QHeaderView, QToolBar, \ QLabel, QPlainTextEdit, QProgressBar, QPushButton, QComboBox, QApplication, QListView, QSizePolicy, \ QMenu, QHBoxLayout from bauh.api import user from bauh.api.abstract.cache import MemoryCache from bauh.api.abstract.context import ApplicationContext from bauh.api.abstract.controller import SoftwareManager, SoftwareAction from bauh.api.abstract.model import SoftwarePackage from bauh.api.abstract.view import MessageType from bauh.api.http import HttpClient from bauh.api.paths import LOGS_DIR from bauh.commons.html import bold from bauh.context import set_theme from bauh.stylesheet import read_all_themes_metadata, ThemeMetadata from bauh.view.core.config import CoreConfigManager from bauh.view.core.tray_client import notify_tray from bauh.view.qt import dialog, commons, qt_utils from bauh.view.qt.about import AboutDialog from bauh.view.qt.apps_table import PackagesTable, UpgradeToggleButton from bauh.view.qt.commons import sum_updates_displayed from bauh.view.qt.components import new_spacer, IconButton, QtComponentsManager, to_widget, QSearchBar, \ QCustomMenuAction, QCustomToolbar from bauh.view.qt.dialog import ConfirmationDialog from bauh.view.qt.history import HistoryDialog from bauh.view.qt.info import InfoDialog from bauh.view.qt.root import RootDialog from bauh.view.qt.screenshots import ScreenshotsDialog from bauh.view.qt.settings import SettingsWindow from bauh.view.qt.thread import UpgradeSelected, RefreshApps, UninstallPackage, DowngradePackage, ShowPackageInfo, \ ShowPackageHistory, SearchPackages, InstallPackage, AnimateProgress, NotifyPackagesReady, FindSuggestions, \ ListWarnings, \ AsyncAction, LaunchPackage, ApplyFilters, CustomSoftwareAction, ShowScreenshots, CustomAction, \ NotifyInstalledLoaded, \ IgnorePackageUpdates, SaveTheme, StartAsyncAction from bauh.view.qt.view_model import PackageView, PackageViewStatus from bauh.view.util import util, resource from bauh.view.util.translation import I18n DARK_ORANGE = '#FF4500' # action ids ACTION_APPLY_FILTERS = 1 ACTION_SEARCH = 2 ACTION_INSTALL = 3 ACTION_UNINSTALL = 4 ACTION_INFO = 5 ACTION_HISTORY = 6 ACTION_DOWNGRADE = 7 ACTION_UPGRADE = 8 ACTION_LAUNCH = 9 ACTION_CUSTOM_ACTION = 10 ACTION_SCREENSHOTS = 11 ACTION_IGNORE_UPDATES = 12 # components ids SEARCH_BAR = 1 BT_INSTALLED = 2 BT_REFRESH = 3 BT_SUGGESTIONS = 4 BT_UPGRADE = 5 CHECK_UPDATES = 6 CHECK_APPS = 7 COMBO_TYPES = 8 COMBO_CATEGORIES = 9 INP_NAME = 10 CHECK_DETAILS = 11 BT_SETTINGS = 12 BT_CUSTOM_ACTIONS = 13 BT_ABOUT = 14 BT_THEMES = 15 # component groups ids GROUP_FILTERS = 1 GROUP_VIEW_INSTALLED = 2 GROUP_VIEW_SEARCH = 3 GROUP_UPPER_BAR = 4 GROUP_LOWER_BTS = 5 class ManageWindow(QWidget): signal_user_res = pyqtSignal(bool) signal_root_password = pyqtSignal(bool, str) signal_table_update = pyqtSignal() signal_stop_notifying = pyqtSignal() def __init__(self, i18n: I18n, icon_cache: MemoryCache, manager: SoftwareManager, screen_size, config: dict, context: ApplicationContext, http_client: HttpClient, logger: logging.Logger, icon: QIcon): super(ManageWindow, self).__init__() self.setObjectName('manage_window') self.comp_manager = QtComponentsManager() self.i18n = i18n self.logger = logger self.manager = manager self.working = False # restrict the number of threaded actions self.installed_loaded = False # used to control the state when the interface is set to not load the apps on startup self.pkgs = [] # packages current loaded in the table self.pkgs_available = [] # all packages loaded in memory self.pkgs_installed = [] # cached installed packages self.display_limit = config['ui']['table']['max_displayed'] self.icon_cache = icon_cache self.screen_size = screen_size self.config = config self.context = context self.http_client = http_client self.icon_app = icon self.setWindowIcon(self.icon_app) self.layout = QVBoxLayout() self.setLayout(self.layout) self.toolbar_status = QToolBar() self.toolbar_status.setObjectName('toolbar_status') self.toolbar_status.addWidget(new_spacer()) self.label_status = QLabel() self.label_status.setObjectName('label_status') self.label_status.setText('') self.toolbar_status.addWidget(self.label_status) self.search_bar = QSearchBar(search_callback=self.search) self.search_bar.set_placeholder(i18n['window_manage.search_bar.placeholder'] + "...") self.search_bar.set_tooltip(i18n['window_manage.search_bar.tooltip']) self.search_bar.set_button_tooltip(i18n['window_manage.search_bar.button_tooltip']) self.comp_manager.register_component(SEARCH_BAR, self.search_bar, self.toolbar_status.addWidget(self.search_bar)) self.toolbar_status.addWidget(new_spacer()) self.layout.addWidget(self.toolbar_status) self.toolbar_filters = QWidget() self.toolbar_filters.setObjectName('table_filters') self.toolbar_filters.setLayout(QHBoxLayout()) self.toolbar_filters.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed) self.toolbar_filters.setContentsMargins(0, 0, 0, 0) self.check_updates = QCheckBox() self.check_updates.setObjectName('check_updates') self.check_updates.setCursor(QCursor(Qt.PointingHandCursor)) self.check_updates.setText(self.i18n['updates'].capitalize()) self.check_updates.stateChanged.connect(self._handle_updates_filter) self.check_updates.sizePolicy().setRetainSizeWhenHidden(True) self.toolbar_filters.layout().addWidget(self.check_updates) self.comp_manager.register_component(CHECK_UPDATES, self.check_updates) self.check_apps = QCheckBox() self.check_apps.setObjectName('check_apps') self.check_apps.setCursor(QCursor(Qt.PointingHandCursor)) self.check_apps.setText(self.i18n['manage_window.checkbox.only_apps']) self.check_apps.setChecked(True) self.check_apps.stateChanged.connect(self._handle_filter_only_apps) self.check_apps.sizePolicy().setRetainSizeWhenHidden(True) self.toolbar_filters.layout().addWidget(self.check_apps) self.comp_manager.register_component(CHECK_APPS, self.check_apps) self.any_type_filter = 'any' self.cache_type_filter_icons = {} self.combo_filter_type = QComboBox() self.combo_filter_type.setObjectName('combo_types') self.combo_filter_type.setCursor(QCursor(Qt.PointingHandCursor)) self.combo_filter_type.setView(QListView()) self.combo_filter_type.view().setCursor(QCursor(Qt.PointingHandCursor)) self.combo_filter_type.setSizeAdjustPolicy(QComboBox.AdjustToContents) self.combo_filter_type.setEditable(True) self.combo_filter_type.lineEdit().setReadOnly(True) self.combo_filter_type.lineEdit().setAlignment(Qt.AlignCenter) self.combo_filter_type.activated.connect(self._handle_type_filter) self.combo_filter_type.addItem('--- {} ---'.format(self.i18n['type'].capitalize()), self.any_type_filter) self.combo_filter_type.sizePolicy().setRetainSizeWhenHidden(True) self.toolbar_filters.layout().addWidget(self.combo_filter_type) self.comp_manager.register_component(COMBO_TYPES, self.combo_filter_type) self.any_category_filter = 'any' self.combo_categories = QComboBox() self.combo_categories.setObjectName('combo_categories') self.combo_categories.setCursor(QCursor(Qt.PointingHandCursor)) self.combo_categories.setSizeAdjustPolicy(QComboBox.AdjustToContents) self.combo_categories.view().setCursor(QCursor(Qt.PointingHandCursor)) self.combo_categories.setEditable(True) self.combo_categories.lineEdit().setReadOnly(True) self.combo_categories.lineEdit().setAlignment(Qt.AlignCenter) self.combo_categories.activated.connect(self._handle_category_filter) self.combo_categories.sizePolicy().setRetainSizeWhenHidden(True) self.combo_categories.addItem('--- {} ---'.format(self.i18n['category'].capitalize()), self.any_category_filter) self.toolbar_filters.layout().addWidget(self.combo_categories) self.comp_manager.register_component(COMBO_CATEGORIES, self.combo_categories) self.input_name = QSearchBar(search_callback=self.begin_apply_filters) self.input_name.palette().swap(self.combo_categories.palette()) self.input_name.setObjectName('name_filter') self.input_name.set_placeholder(self.i18n['manage_window.name_filter.placeholder'] + '...') self.input_name.set_tooltip(self.i18n['manage_window.name_filter.tooltip']) self.input_name.set_button_tooltip(self.i18n['manage_window.name_filter.button_tooltip']) self.input_name.sizePolicy().setRetainSizeWhenHidden(True) self.toolbar_filters.layout().addWidget(self.input_name) self.comp_manager.register_component(INP_NAME, self.input_name) self.toolbar_filters.layout().addWidget(new_spacer()) toolbar_bts = [] bt_inst = QPushButton() bt_inst.setObjectName('bt_installed') bt_inst.setProperty('root', 'true') bt_inst.setCursor(QCursor(Qt.PointingHandCursor)) bt_inst.setToolTip(self.i18n['manage_window.bt.installed.tooltip']) bt_inst.setText(self.i18n['manage_window.bt.installed.text'].capitalize()) bt_inst.clicked.connect(self._begin_loading_installed) bt_inst.sizePolicy().setRetainSizeWhenHidden(True) toolbar_bts.append(bt_inst) self.toolbar_filters.layout().addWidget(bt_inst) self.comp_manager.register_component(BT_INSTALLED, bt_inst) bt_ref = QPushButton() bt_ref.setObjectName('bt_refresh') bt_ref.setProperty('root', 'true') bt_ref.setCursor(QCursor(Qt.PointingHandCursor)) bt_ref.setToolTip(i18n['manage_window.bt.refresh.tooltip']) bt_ref.setText(self.i18n['manage_window.bt.refresh.text']) bt_ref.clicked.connect(self.begin_refresh_packages) bt_ref.sizePolicy().setRetainSizeWhenHidden(True) toolbar_bts.append(bt_ref) self.toolbar_filters.layout().addWidget(bt_ref) self.comp_manager.register_component(BT_REFRESH, bt_ref) self.bt_upgrade = QPushButton() self.bt_upgrade.setProperty('root', 'true') self.bt_upgrade.setObjectName('bt_upgrade') self.bt_upgrade.setCursor(QCursor(Qt.PointingHandCursor)) self.bt_upgrade.setToolTip(i18n['manage_window.bt.upgrade.tooltip']) self.bt_upgrade.setText(i18n['manage_window.bt.upgrade.text']) self.bt_upgrade.clicked.connect(self.upgrade_selected) self.bt_upgrade.sizePolicy().setRetainSizeWhenHidden(True) toolbar_bts.append(self.bt_upgrade) self.toolbar_filters.layout().addWidget(self.bt_upgrade) self.comp_manager.register_component(BT_UPGRADE, self.bt_upgrade) # setting all buttons to the same size: bt_biggest_size = 0 for bt in toolbar_bts: bt_width = bt.sizeHint().width() if bt_width > bt_biggest_size: bt_biggest_size = bt_width for bt in toolbar_bts: bt_width = bt.sizeHint().width() if bt_biggest_size > bt_width: bt.setFixedWidth(bt_biggest_size) self.layout.addWidget(self.toolbar_filters) self.table_container = QWidget() self.table_container.setObjectName('table_container') self.table_container.setContentsMargins(0, 0, 0, 0) self.table_container.setLayout(QVBoxLayout()) self.table_container.layout().setContentsMargins(0, 0, 0, 0) self.table_apps = PackagesTable(self, self.icon_cache, download_icons=bool(self.config['download']['icons'])) self.table_apps.change_headers_policy() self.table_container.layout().addWidget(self.table_apps) self.layout.addWidget(self.table_container) self.toolbar_console = QWidget() self.toolbar_console.setObjectName('console_toolbar') self.toolbar_console.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed) self.toolbar_console.setLayout(QHBoxLayout()) self.toolbar_console.setContentsMargins(0, 0, 0, 0) self.check_details = QCheckBox() self.check_details.setObjectName('check_details') self.check_details.setCursor(QCursor(Qt.PointingHandCursor)) self.check_details.setText(self.i18n['manage_window.checkbox.show_details']) self.check_details.stateChanged.connect(self._handle_console) self.toolbar_console.layout().addWidget(self.check_details) self.comp_manager.register_component(CHECK_DETAILS, self.check_details) self.toolbar_console.layout().addWidget(new_spacer()) self.label_displayed = QLabel() self.label_displayed.setObjectName('apps_displayed') self.label_displayed.setCursor(QCursor(Qt.WhatsThisCursor)) self.label_displayed.setToolTip(self.i18n['manage_window.label.apps_displayed.tip']) self.toolbar_console.layout().addWidget(self.label_displayed) self.label_displayed.hide() self.layout.addWidget(self.toolbar_console) self.textarea_details = QPlainTextEdit(self) self.textarea_details.setObjectName('textarea_details') self.textarea_details.setProperty('console', 'true') self.textarea_details.resize(self.table_apps.size()) self.layout.addWidget(self.textarea_details) self.textarea_details.setVisible(False) self.textarea_details.setReadOnly(True) self.toolbar_substatus = QToolBar() self.toolbar_substatus.setObjectName('toolbar_substatus') self.toolbar_substatus.addWidget(new_spacer()) self.label_substatus = QLabel() self.label_substatus.setObjectName('label_substatus') self.label_substatus.setCursor(QCursor(Qt.WaitCursor)) self.toolbar_substatus.addWidget(self.label_substatus) self.toolbar_substatus.addWidget(new_spacer()) self.layout.addWidget(self.toolbar_substatus) self._change_label_substatus('') self.thread_update = self._bind_async_action(UpgradeSelected(self.manager, context.internet_checker, self.i18n), finished_call=self._finish_upgrade_selected) self.thread_refresh = self._bind_async_action(RefreshApps(self.manager), finished_call=self._finish_refresh_packages, only_finished=True) self.thread_uninstall = self._bind_async_action(UninstallPackage(self.manager, self.icon_cache, self.i18n), finished_call=self._finish_uninstall) self.thread_show_info = self._bind_async_action(ShowPackageInfo(self.manager), finished_call=self._finish_show_info) self.thread_show_history = self._bind_async_action(ShowPackageHistory(self.manager, self.i18n), finished_call=self._finish_show_history) self.thread_search = self._bind_async_action(SearchPackages(self.manager), finished_call=self._finish_search, only_finished=True) self.thread_downgrade = self._bind_async_action(DowngradePackage(self.manager, self.i18n), finished_call=self._finish_downgrade) self.thread_suggestions = self._bind_async_action(FindSuggestions(man=self.manager), finished_call=self._finish_load_suggestions, only_finished=True) self.thread_launch = self._bind_async_action(LaunchPackage(self.manager), finished_call=self._finish_launch_package, only_finished=False) self.thread_custom_action = self._bind_async_action(CustomAction(manager=self.manager, i18n=self.i18n), finished_call=self._finish_execute_custom_action) self.thread_screenshots = self._bind_async_action(ShowScreenshots(self.manager), finished_call=self._finish_show_screenshots) self.thread_apply_filters = ApplyFilters() self.thread_apply_filters.signal_finished.connect(self._finish_apply_filters) self.thread_apply_filters.signal_table.connect(self._update_table_and_upgrades) self.signal_table_update.connect(self.thread_apply_filters.stop_waiting) self.thread_install = InstallPackage(manager=self.manager, icon_cache=self.icon_cache, i18n=self.i18n) self._bind_async_action(self.thread_install, finished_call=self._finish_install) self.thread_animate_progress = AnimateProgress() self.thread_animate_progress.signal_change.connect(self._update_progress) self.thread_notify_pkgs_ready = NotifyPackagesReady() self.thread_notify_pkgs_ready.signal_changed.connect(self._update_package_data) self.thread_notify_pkgs_ready.signal_finished.connect(self._update_state_when_pkgs_ready) self.signal_stop_notifying.connect(self.thread_notify_pkgs_ready.stop_working) self.thread_ignore_updates = IgnorePackageUpdates(manager=self.manager) self._bind_async_action(self.thread_ignore_updates, finished_call=self.finish_ignore_updates) self.thread_reload = StartAsyncAction(delay_in_milis=5) self.thread_reload.signal_start.connect(self._reload) self.container_bottom = QWidget() self.container_bottom.setObjectName('container_bottom') self.container_bottom.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed) self.container_bottom.setLayout(QHBoxLayout()) self.container_bottom.layout().setContentsMargins(0, 0, 0, 0) self.container_bottom.layout().addWidget(new_spacer()) if config['suggestions']['enabled']: bt_sugs = IconButton(action=lambda: self._begin_load_suggestions(filter_installed=True), i18n=i18n, tooltip=self.i18n['manage_window.bt.suggestions.tooltip']) bt_sugs.setObjectName('suggestions') self.container_bottom.layout().addWidget(bt_sugs) self.comp_manager.register_component(BT_SUGGESTIONS, bt_sugs) bt_themes = IconButton(self.show_themes, i18n=self.i18n, tooltip=self.i18n['manage_window.bt_themes.tip']) bt_themes.setObjectName('themes') self.container_bottom.layout().addWidget(bt_themes) self.comp_manager.register_component(BT_THEMES, bt_themes) self.custom_actions = [a for a in manager.gen_custom_actions()] bt_custom_actions = IconButton(action=self.show_custom_actions, i18n=self.i18n, tooltip=self.i18n['manage_window.bt_custom_actions.tip']) bt_custom_actions.setObjectName('custom_actions') bt_custom_actions.setVisible(bool(self.custom_actions)) self.container_bottom.layout().addWidget(bt_custom_actions) self.comp_manager.register_component(BT_CUSTOM_ACTIONS, bt_custom_actions) bt_settings = IconButton(action=self.show_settings, i18n=self.i18n, tooltip=self.i18n['manage_window.bt_settings.tooltip']) bt_settings.setObjectName('settings') self.container_bottom.layout().addWidget(bt_settings) self.comp_manager.register_component(BT_SETTINGS, bt_settings) bt_about = IconButton(action=self._show_about, i18n=self.i18n, tooltip=self.i18n['manage_window.settings.about']) bt_about.setObjectName('about') self.container_bottom.layout().addWidget(bt_about) self.comp_manager.register_component(BT_ABOUT, bt_about) self.layout.addWidget(self.container_bottom) self.container_progress = QCustomToolbar(spacing=0, policy_height=QSizePolicy.Fixed) self.container_progress.setObjectName('container_progress') self.container_progress.add_space() self.progress_bar = QProgressBar() self.progress_bar.setObjectName('progress_manage') self.progress_bar.setCursor(QCursor(Qt.WaitCursor)) self.progress_bar.setTextVisible(False) self.container_progress.add_widget(self.progress_bar) self.container_progress.add_space() self.layout.addWidget(self.container_progress) qt_utils.centralize(self) self.filter_only_apps = True self.type_filter = self.any_type_filter self.category_filter = self.any_category_filter self.filter_updates = False self._maximized = False self.progress_controll_enabled = True self.recent_uninstall = False self.types_changed = False self.dialog_about = None self.load_suggestions = bool(config['suggestions']['enabled']) self.suggestions_requested = False self.first_refresh = True self.thread_warnings = ListWarnings(man=manager, i18n=i18n) self.thread_warnings.signal_warnings.connect(self._show_warnings) self.settings_window = None self.search_performed = False self.thread_save_theme = SaveTheme(theme_key='') self.thread_load_installed = NotifyInstalledLoaded() self.thread_load_installed.signal_loaded.connect(self._finish_loading_installed) self.setMinimumHeight(int(screen_size.height() * 0.5)) self.setMinimumWidth(int(screen_size.width() * 0.6)) self._register_groups() def _register_groups(self): filters = (CHECK_APPS, CHECK_UPDATES, COMBO_CATEGORIES, COMBO_TYPES, INP_NAME) self.comp_manager.register_group(GROUP_FILTERS, False, *filters) self.comp_manager.register_group(GROUP_VIEW_SEARCH, False, COMBO_CATEGORIES, COMBO_TYPES, INP_NAME, # filters BT_INSTALLED, BT_SUGGESTIONS) # buttons self.comp_manager.register_group(GROUP_VIEW_INSTALLED, False, BT_REFRESH, BT_UPGRADE, # buttons *filters) self.comp_manager.register_group(GROUP_UPPER_BAR, False, CHECK_APPS, CHECK_UPDATES, COMBO_CATEGORIES, COMBO_TYPES, INP_NAME, BT_INSTALLED, BT_SUGGESTIONS, BT_REFRESH, BT_UPGRADE) self.comp_manager.register_group(GROUP_LOWER_BTS, False, BT_SUGGESTIONS, BT_THEMES, BT_CUSTOM_ACTIONS, BT_SETTINGS, BT_ABOUT) def update_custom_actions(self): self.custom_actions = [a for a in self.manager.gen_custom_actions()] def _update_process_progress(self, val: int): if self.progress_controll_enabled: self.thread_animate_progress.set_progress(val) def _change_status(self, status: str = None): if status: self.label_status.setText(status + '...') self.label_status.setCursor(QCursor(Qt.WaitCursor)) else: self.label_status.setText('') self.label_status.unsetCursor() def _set_table_enabled(self, enabled: bool): self.table_apps.setEnabled(enabled) if enabled: self.table_container.unsetCursor() else: self.table_container.setCursor(QCursor(Qt.WaitCursor)) def begin_apply_filters(self): self.stop_notifying_package_states() self._begin_action(action_label=self.i18n['manage_window.status.filtering'], action_id=ACTION_APPLY_FILTERS) self.comp_manager.disable_visible_from_groups(GROUP_UPPER_BAR, GROUP_LOWER_BTS) self.comp_manager.set_component_read_only(INP_NAME, True) self.thread_apply_filters.filters = self._gen_filters() self.thread_apply_filters.pkgs = self.pkgs_available self.thread_apply_filters.start() self.setFocus(Qt.NoFocusReason) def _finish_apply_filters(self): self._finish_action(ACTION_APPLY_FILTERS) self.update_bt_upgrade() def stop_notifying_package_states(self): if self.thread_notify_pkgs_ready.isRunning(): self.signal_stop_notifying.emit() self.thread_notify_pkgs_ready.wait(1000) def _update_table_and_upgrades(self, pkgs_info: dict): self._update_table(pkgs_info=pkgs_info, signal=True) if self.pkgs: self._update_state_when_pkgs_ready() self.stop_notifying_package_states() self.thread_notify_pkgs_ready.pkgs = self.pkgs self.thread_notify_pkgs_ready.work = True self.thread_notify_pkgs_ready.start() def _bind_async_action(self, action: AsyncAction, finished_call, only_finished: bool = False) -> AsyncAction: action.signal_finished.connect(finished_call) if not only_finished: action.signal_confirmation.connect(self._ask_confirmation) action.signal_output.connect(self._update_action_output) action.signal_message.connect(self._show_message) action.signal_status.connect(self._change_label_status) action.signal_substatus.connect(self._change_label_substatus) action.signal_progress.connect(self._update_process_progress) action.signal_progress_control.connect(self.set_progress_controll) action.signal_root_password.connect(self._pause_and_ask_root_password) self.signal_user_res.connect(action.confirm) self.signal_root_password.connect(action.set_root_password) return action def _ask_confirmation(self, msg: dict): self.thread_animate_progress.pause() extra_widgets = [to_widget(comp=c, i18n=self.i18n) for c in msg['components']] if msg.get('components') else None diag = ConfirmationDialog(title=msg['title'], body=msg['body'], i18n=self.i18n, widgets=extra_widgets, confirmation_label=msg['confirmation_label'], deny_label=msg['deny_label'], deny_button=msg['deny_button'], window_cancel=msg['window_cancel'], confirmation_button=msg.get('confirmation_button', True)) diag.ask() res = diag.confirmed self.thread_animate_progress.animate() self.signal_user_res.emit(res) def _pause_and_ask_root_password(self): self.thread_animate_progress.pause() valid, password = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager) self.thread_animate_progress.animate() self.signal_root_password.emit(valid, password) def _show_message(self, msg: dict): self.thread_animate_progress.pause() dialog.show_message(title=msg['title'], body=msg['body'], type_=msg['type']) self.thread_animate_progress.animate() def _show_warnings(self, warnings: List[str]): if warnings: dialog.show_message(title=self.i18n['warning'].capitalize(), body='<p>{}</p>'.format('<br/><br/>'.join(warnings)), type_=MessageType.WARNING) def show(self): super(ManageWindow, self).show() if not self.thread_warnings.isFinished(): self.thread_warnings.start() qt_utils.centralize(self) def verify_warnings(self): self.thread_warnings.start() def _begin_loading_installed(self): if self.installed_loaded: self.search_bar.clear() self.input_name.set_text('') self._begin_action(self.i18n['manage_window.status.installed']) self._handle_console_option(False) self.comp_manager.set_components_visible(False) self.suggestions_requested = False self.search_performed = False self.thread_load_installed.start() else: self.load_suggestions = False self.begin_refresh_packages() def _finish_loading_installed(self): self._finish_action() self.comp_manager.set_group_visible(GROUP_VIEW_INSTALLED, True) self.update_pkgs(new_pkgs=None, as_installed=True) self._hide_filters_no_packages() self._update_bts_installed_and_suggestions() self._set_lower_buttons_visible(True) self._reorganize() def _update_bts_installed_and_suggestions(self): available_types = len(self.manager.get_managed_types()) self.comp_manager.set_component_visible(BT_INSTALLED, available_types > 0 and any([self.suggestions_requested, self.search_performed])) self.comp_manager.set_component_visible(BT_SUGGESTIONS, available_types > 0) def _hide_filters_no_packages(self): if not self.pkgs: self.comp_manager.set_group_visible(GROUP_FILTERS, False) def _show_about(self): if self.dialog_about is None: self.dialog_about = AboutDialog(self.config) self.dialog_about.show() def _handle_updates_filter(self, status: int): self.filter_updates = status == 2 self.begin_apply_filters() def _handle_filter_only_apps(self, status: int): self.filter_only_apps = status == 2 self.begin_apply_filters() def _handle_type_filter(self, idx: int): self.type_filter = self.combo_filter_type.itemData(idx) self.combo_filter_type.adjustSize() self.begin_apply_filters() def _handle_category_filter(self, idx: int): self.category_filter = self.combo_categories.itemData(idx) self.begin_apply_filters() def _update_state_when_pkgs_ready(self): if self.progress_bar.isVisible(): return self._reload_categories() self._reorganize() def _update_package_data(self, idx: int): if self.table_apps.isEnabled(): pkg = self.pkgs[idx] pkg.status = PackageViewStatus.READY self.table_apps.update_package(pkg) def _reload_categories(self): categories = set() for p in self.pkgs_available: if p.model.categories: for c in p.model.categories: if c: cat = c.strip().lower() if cat: categories.add(cat) if categories: self._update_categories(categories, keep_selected=True) def changeEvent(self, e: QEvent): if isinstance(e, QWindowStateChangeEvent): self._maximized = self.isMaximized() self.table_apps.change_headers_policy(maximized=self._maximized) def _handle_console(self, checked: bool): if checked: self.textarea_details.show() else: self.textarea_details.hide() def _handle_console_option(self, enable: bool): if enable: self.textarea_details.clear() self.comp_manager.set_component_visible(CHECK_DETAILS, enable) self.check_details.setChecked(False) self.textarea_details.hide() def begin_refresh_packages(self, pkg_types: Optional[Set[Type[SoftwarePackage]]] = None): self.search_bar.clear() self._begin_action(self.i18n['manage_window.status.refreshing']) self.comp_manager.set_components_visible(False) self._handle_console_option(False) self.suggestions_requested = False self.search_performed = False self.thread_refresh.pkg_types = pkg_types self.thread_refresh.start() def _finish_refresh_packages(self, res: dict, as_installed: bool = True): self._finish_action() self._set_lower_buttons_visible(True) self.comp_manager.set_component_visible(SEARCH_BAR, True) if self.search_performed or self.suggestions_requested: self.comp_manager.set_group_visible(GROUP_VIEW_SEARCH, True) else: self.comp_manager.set_group_visible(GROUP_VIEW_INSTALLED, True) if self.update_pkgs(res['installed'], as_installed=as_installed, types=res['types']): self._hide_filters_no_packages() self._update_bts_installed_and_suggestions() self._reorganize() self.load_suggestions = False self.types_changed = False def load_without_packages(self): self.load_suggestions = False self._handle_console_option(False) self._finish_refresh_packages({'installed': None, 'types': None}, as_installed=False) def _begin_load_suggestions(self, filter_installed: bool): self.search_bar.clear() self._begin_action(self.i18n['manage_window.status.suggestions']) self._handle_console_option(False) self.comp_manager.set_components_visible(False) self.suggestions_requested = True self.thread_suggestions.filter_installed = filter_installed self.thread_suggestions.start() def _finish_load_suggestions(self, res: dict): self._finish_search(res) def begin_uninstall(self, pkg: PackageView): pwd, proceed = self._ask_root_password(SoftwareAction.UNINSTALL, pkg) if not proceed: return self._begin_action(action_label='{} {}'.format(self.i18n['manage_window.status.uninstalling'], pkg.model.name), action_id=ACTION_UNINSTALL) self.comp_manager.set_groups_visible(False, GROUP_UPPER_BAR, GROUP_LOWER_BTS) self._handle_console_option(True) self.thread_uninstall.pkg = pkg self.thread_uninstall.root_pwd = pwd self.thread_uninstall.start() def _finish_uninstall(self, res: dict): self._finish_action(action_id=ACTION_UNINSTALL) if res['success']: src_pkg = res['pkg'] if self._can_notify_user(): util.notify_user('{} ({}) {}'.format(src_pkg.model.name, src_pkg.model.get_type(), self.i18n['uninstalled'])) if res['removed']: for list_idx, pkg_list in enumerate((self.pkgs_available, self.pkgs, self.pkgs_installed)): if pkg_list: removed_idxs = [] for pkgv_idx, pkgv in enumerate(pkg_list): if len(removed_idxs) == len(res['removed']): break for model in res['removed']: if pkgv.model == model: if list_idx == 0: # updates the model pkgv.update_model(model) if not self.search_performed or list_idx == 2: # always from the installed packages removed_idxs.append(pkgv_idx) if self.search_performed and list_idx == 1: # only for displayed self.table_apps.update_package(pkgv, change_update_col=True) break # as the model has been found, stops the loop if removed_idxs: # updating the list removed_idxs.sort() for decrement, pkg_idx in enumerate(removed_idxs): del pkg_list[pkg_idx - decrement] if list_idx == 1: # updates the rows if the current list reprents the displayed packages: for decrement, idx in enumerate(removed_idxs): self.table_apps.removeRow(idx - decrement) self._update_table_indexes() self.update_bt_upgrade() self.update_custom_actions() self._show_console_checkbox_if_output() notify_tray() else: self._show_console_errors() if self._can_notify_user(): util.notify_user('{}: {}'.format(res['pkg'].model.name, self.i18n['notification.uninstall.failed'])) def _update_table_indexes(self): if self.pkgs: for new_idx, pkgv in enumerate(self.pkgs): # updating the package indexes pkgv.table_index = new_idx def begin_launch_package(self, pkg: PackageView): self._begin_action(action_label=self.i18n['manage_window.status.running_app'].format(pkg.model.name), action_id=ACTION_LAUNCH) self.comp_manager.disable_visible() self.thread_launch.pkg = pkg self.thread_launch.start() def _finish_launch_package(self, success: bool): self._finish_action(action_id=ACTION_LAUNCH) def _can_notify_user(self): return bool(self.config['system']['notifications']) and (self.isHidden() or self.isMinimized()) def _change_label_status(self, status: str): self.label_status.setText(status) def _change_label_substatus(self, substatus: str): self.label_substatus.setText('<p>{}</p>'.format(substatus)) if not substatus: self.toolbar_substatus.hide() elif not self.toolbar_substatus.isVisible() and self.progress_bar.isVisible(): self.toolbar_substatus.show() def _reorganize(self): if not self._maximized: self.table_apps.change_headers_policy(QHeaderView.Stretch) self.table_apps.change_headers_policy() self._resize(accept_lower_width=len(self.pkgs) > 0) def _update_table(self, pkgs_info: dict, signal: bool = False): self.pkgs = pkgs_info['pkgs_displayed'] if pkgs_info['not_installed'] == 0: update_check = sum_updates_displayed(pkgs_info) > 0 else: update_check = False self.table_apps.update_packages(self.pkgs, update_check_enabled=update_check) if not self._maximized: self.label_displayed.show() self.table_apps.change_headers_policy(QHeaderView.Stretch) self.table_apps.change_headers_policy() self._resize(accept_lower_width=len(self.pkgs) > 0) if len(self.pkgs) == 0 and len(self.pkgs_available) == 0: self.label_displayed.setText('') else: self.label_displayed.setText('{} / {}'.format(len(self.pkgs), len(self.pkgs_available))) else: self.label_displayed.hide() if signal: self.signal_table_update.emit() def update_bt_upgrade(self, pkgs_info: dict = None): show_bt_upgrade = False if not any([self.suggestions_requested, self.search_performed]) and (not pkgs_info or pkgs_info['not_installed'] == 0): for pkg in (pkgs_info['pkgs_displayed'] if pkgs_info else self.pkgs): if not pkg.model.is_update_ignored() and pkg.update_checked: show_bt_upgrade = True break self.comp_manager.set_component_visible(BT_UPGRADE, show_bt_upgrade) if show_bt_upgrade: self._reorganize() def change_update_state(self, pkgs_info: dict, trigger_filters: bool = True, keep_selected: bool = False): self.update_bt_upgrade(pkgs_info) if pkgs_info['updates'] > 0: if pkgs_info['not_installed'] == 0: if not self.comp_manager.is_visible(CHECK_UPDATES): self.comp_manager.set_component_visible(CHECK_UPDATES, True) if not self.filter_updates and not keep_selected: self._change_checkbox(self.check_updates, True, 'filter_updates', trigger_filters) if pkgs_info['napp_updates'] > 0 and self.filter_only_apps and not keep_selected: self._change_checkbox(self.check_apps, False, 'filter_only_apps', trigger_filters) else: if not keep_selected: self._change_checkbox(self.check_updates, False, 'filter_updates', trigger_filters) self.comp_manager.set_component_visible(CHECK_UPDATES, False) def _change_checkbox(self, checkbox: QCheckBox, checked: bool, attr: str = None, trigger: bool = True): if not trigger: checkbox.blockSignals(True) checkbox.setChecked(checked) if not trigger: setattr(self, attr, checked) checkbox.blockSignals(False) def _gen_filters(self, ignore_updates: bool = False) -> dict: return { 'only_apps': False if self.search_performed else self.filter_only_apps, 'type': self.type_filter, 'category': self.category_filter, 'updates': False if ignore_updates else self.filter_updates, 'name': self.input_name.text().lower() if self.input_name.text() else None, 'display_limit': None if self.filter_updates else self.display_limit } def update_pkgs(self, new_pkgs: Optional[List[SoftwarePackage]], as_installed: bool, types: Optional[Set[type]] = None, ignore_updates: bool = False, keep_filters: bool = False) -> bool: self.input_name.set_text('') pkgs_info = commons.new_pkgs_info() filters = self._gen_filters(ignore_updates=ignore_updates) if new_pkgs is not None: old_installed = None if as_installed: old_installed = self.pkgs_installed self.pkgs_installed = [] for pkg in new_pkgs: app_model = PackageView(model=pkg, i18n=self.i18n) commons.update_info(app_model, pkgs_info) commons.apply_filters(app_model, filters, pkgs_info) if old_installed and types: for pkgv in old_installed: if pkgv.model.__class__ not in types: commons.update_info(pkgv, pkgs_info) commons.apply_filters(pkgv, filters, pkgs_info) else: # use installed for pkgv in self.pkgs_installed: commons.update_info(pkgv, pkgs_info) commons.apply_filters(pkgv, filters, pkgs_info) if pkgs_info['apps_count'] == 0: if self.load_suggestions or self.types_changed: if as_installed: self.pkgs_installed = pkgs_info['pkgs'] self._begin_load_suggestions(filter_installed=False) self.load_suggestions = False return False else: if not keep_filters: self._change_checkbox(self.check_apps, False, 'filter_only_apps', trigger=False) self.check_apps.setCheckable(False) else: if not keep_filters: self.check_apps.setCheckable(True) self._change_checkbox(self.check_apps, True, 'filter_only_apps', trigger=False) self.change_update_state(pkgs_info=pkgs_info, trigger_filters=False, keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed'])) self._update_categories(pkgs_info['categories'], keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed'])) self._update_type_filters(pkgs_info['available_types'], keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed'])) self._apply_filters(pkgs_info, ignore_updates=ignore_updates) self.change_update_state(pkgs_info=pkgs_info, trigger_filters=False, keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed'])) self.pkgs_available = pkgs_info['pkgs'] if as_installed: self.pkgs_installed = pkgs_info['pkgs'] self.pkgs = pkgs_info['pkgs_displayed'] self._update_table(pkgs_info=pkgs_info) if new_pkgs: self.stop_notifying_package_states() self.thread_notify_pkgs_ready.work = True self.thread_notify_pkgs_ready.pkgs = self.pkgs self.thread_notify_pkgs_ready.start() self._resize(accept_lower_width=bool(self.pkgs_installed)) if self.first_refresh: qt_utils.centralize(self) self.first_refresh = False if not self.installed_loaded and as_installed: self.installed_loaded = True return True def _apply_filters(self, pkgs_info: dict, ignore_updates: bool): pkgs_info['pkgs_displayed'] = [] filters = self._gen_filters(ignore_updates=ignore_updates) for pkgv in pkgs_info['pkgs']: commons.apply_filters(pkgv, filters, pkgs_info) def _clean_combo_types(self): if self.combo_filter_type.count() > 1: for _ in range(self.combo_filter_type.count() - 1): self.combo_filter_type.removeItem(1) def _update_type_filters(self, available_types: dict = None, keep_selected: bool = False): if available_types is None: self.comp_manager.set_component_visible(COMBO_TYPES, self.combo_filter_type.count() > 2) else: keeping_selected = keep_selected and available_types and self.type_filter in available_types if not keeping_selected: self.type_filter = self.any_type_filter if not available_types: self._clean_combo_types() if available_types: self._clean_combo_types() sel_type = -1 for idx, item in enumerate(available_types.items()): app_type, icon_path, label = item[0], item[1]['icon'], item[1]['label'] icon = self.cache_type_filter_icons.get(app_type) if not icon: icon = QIcon(icon_path) self.cache_type_filter_icons[app_type] = icon self.combo_filter_type.addItem(icon, label, app_type) if keeping_selected and app_type == self.type_filter: sel_type = idx + 1 self.combo_filter_type.blockSignals(True) self.combo_filter_type.setCurrentIndex(sel_type if sel_type > -1 else 0) self.combo_filter_type.blockSignals(False) self.comp_manager.set_component_visible(COMBO_TYPES, len(available_types) > 1) else: self.comp_manager.set_component_visible(COMBO_TYPES, False) def _update_categories(self, categories: Set[str] = None, keep_selected: bool = False): if categories is None: self.comp_manager.set_component_visible(COMBO_CATEGORIES, self.combo_categories.count() > 1) else: keeping_selected = keep_selected and categories and self.category_filter in categories if not keeping_selected: self.category_filter = self.any_category_filter if categories: if self.combo_categories.count() > 1: for _ in range(self.combo_categories.count() - 1): self.combo_categories.removeItem(1) selected_cat = -1 cat_list = list(categories) cat_list.sort() for idx, c in enumerate(cat_list): self.__add_category(c) if keeping_selected and c == self.category_filter: selected_cat = idx + 1 self.combo_categories.blockSignals(True) self.combo_categories.setCurrentIndex(selected_cat if selected_cat > -1 else 0) self.combo_categories.blockSignals(False) self.comp_manager.set_component_visible(COMBO_CATEGORIES, True) else: self.comp_manager.set_component_visible(COMBO_CATEGORIES, False) def __add_category(self, category: str): i18n_cat = self.i18n.get('category.{}'.format(category), self.i18n.get(category, category)) self.combo_categories.addItem(i18n_cat.capitalize(), category) def _get_current_categories(self) -> Set[str]: if self.combo_categories.count() > 1: return {self.combo_categories.itemData(idx) for idx in range(self.combo_categories.count()) if idx > 0} def _resize(self, accept_lower_width: bool = True): table_width = self.table_apps.get_width() toolbar_width = self.toolbar_filters.sizeHint().width() topbar_width = self.toolbar_status.sizeHint().width() new_width = max(table_width, toolbar_width, topbar_width) new_width *= 1.05 # this extra size is not because of the toolbar button, but the table upgrade buttons if (self.pkgs and accept_lower_width) or new_width > self.width(): self.resize(int(new_width), self.height()) def set_progress_controll(self, enabled: bool): self.progress_controll_enabled = enabled def upgrade_selected(self): body = QWidget() body.setLayout(QHBoxLayout()) body.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Preferred) body.layout().addWidget(QLabel(self.i18n['manage_window.upgrade_all.popup.body'])) body.layout().addWidget(UpgradeToggleButton(pkg=None, root=self, i18n=self.i18n, clickable=False)) if ConfirmationDialog(title=self.i18n['manage_window.upgrade_all.popup.title'], i18n=self.i18n, body=None, widgets=[body]).ask(): self._begin_action(action_label=self.i18n['manage_window.status.upgrading'], action_id=ACTION_UPGRADE) self.comp_manager.set_components_visible(False) self._handle_console_option(True) self.thread_update.pkgs = self.pkgs self.thread_update.start() def _finish_upgrade_selected(self, res: dict): self._finish_action() if res.get('id'): output = self.textarea_details.toPlainText() if output: try: Path(UpgradeSelected.UPGRADE_LOGS_DIR).mkdir(parents=True, exist_ok=True) logs_path = '{}/{}.log'.format(UpgradeSelected.UPGRADE_LOGS_DIR, res['id']) with open(logs_path, 'w+') as f: f.write(output) self.textarea_details.appendPlainText('\n*Upgrade summary generated at: {}'.format(UpgradeSelected.SUMMARY_FILE.format(res['id']))) self.textarea_details.appendPlainText('*Upgrade logs generated at: {}'.format(logs_path)) except: traceback.print_exc() if res['success']: self.comp_manager.remove_saved_state(ACTION_UPGRADE) self.begin_refresh_packages(pkg_types=res['types']) self._show_console_checkbox_if_output() if self._can_notify_user(): util.notify_user('{} {}'.format(res['updated'], self.i18n['notification.update_selected.success'])) notify_tray() else: self.comp_manager.restore_state(ACTION_UPGRADE) self._show_console_errors() if self._can_notify_user(): util.notify_user(self.i18n['notification.update_selected.failed']) self.update_custom_actions() def _show_console_errors(self): if self.textarea_details.toPlainText(): self.check_details.setChecked(True) else: self._handle_console_option(False) self.comp_manager.set_component_visible(CHECK_DETAILS, False) def _update_action_output(self, output: str): self.textarea_details.appendPlainText(output) def _begin_action(self, action_label: str, action_id: int = None): self.thread_animate_progress.stop = False self.thread_animate_progress.start() self.progress_bar.setVisible(True) if action_id is not None: self.comp_manager.save_states(action_id, only_visible=True) self._set_table_enabled(False) self.comp_manager.set_component_visible(SEARCH_BAR, False) self._change_status(action_label) def _set_lower_buttons_visible(self, visible: bool): self.comp_manager.set_group_visible(GROUP_LOWER_BTS, visible) if visible: self.comp_manager.set_component_visible(BT_CUSTOM_ACTIONS, bool(self.custom_actions)) def _finish_action(self, action_id: int = None): self.thread_animate_progress.stop = True self.thread_animate_progress.wait(msecs=1000) self.progress_bar.setVisible(False) self.progress_bar.setValue(0) self.progress_bar.setTextVisible(False) if action_id is not None: self.comp_manager.restore_state(action_id) self.comp_manager.set_component_visible(SEARCH_BAR, True) self._change_status() self._change_label_substatus('') self._set_table_enabled(True) self.progress_controll_enabled = True def begin_downgrade(self, pkg: PackageView): pwd, proceed = self._ask_root_password(SoftwareAction.DOWNGRADE, pkg) if not proceed: return self._begin_action(action_label='{} {}'.format(self.i18n['manage_window.status.downgrading'], pkg.model.name), action_id=ACTION_DOWNGRADE) self.comp_manager.set_components_visible(False) self._handle_console_option(True) self.thread_downgrade.pkg = pkg self.thread_downgrade.root_pwd = pwd self.thread_downgrade.start() def _finish_downgrade(self, res: dict):
def begin_show_info(self, pkg: dict): self._begin_action(self.i18n['manage_window.status.info'], action_id=ACTION_INFO) self.comp_manager.disable_visible() self.thread_show_info.pkg = pkg self.thread_show_info.start() def _finish_show_info(self, pkg_info: dict): self._finish_action(action_id=ACTION_INFO) if pkg_info: if len(pkg_info) > 1: dialog_info = InfoDialog(pkg_info=pkg_info, icon_cache=self.icon_cache, i18n=self.i18n, screen_size=self.screen_size) dialog_info.exec_() else: dialog.show_message(title=self.i18n['warning'].capitalize(), body=self.i18n['manage_window.info.no_info'].format(bold(pkg_info['__app__'].model.name)), type_=MessageType.WARNING) def begin_show_screenshots(self, pkg: PackageView): self._begin_action(action_label=self.i18n['manage_window.status.screenshots'].format(bold(pkg.model.name)), action_id=ACTION_SCREENSHOTS) self.comp_manager.disable_visible() self.thread_screenshots.pkg = pkg self.thread_screenshots.start() def _finish_show_screenshots(self, res: dict): self._finish_action(ACTION_SCREENSHOTS) if res.get('screenshots'): diag = ScreenshotsDialog(pkg=res['pkg'], http_client=self.http_client, icon_cache=self.icon_cache, logger=self.logger, i18n=self.i18n, screenshots=res['screenshots']) diag.exec_() else: dialog.show_message(title=self.i18n['error'], body=self.i18n['popup.screenshots.no_screenshot.body'].format(bold(res['pkg'].model.name)), type_=MessageType.ERROR) def begin_show_history(self, pkg: PackageView): self._begin_action(self.i18n['manage_window.status.history'], action_id=ACTION_HISTORY) self.comp_manager.disable_visible() self.thread_show_history.pkg = pkg self.thread_show_history.start() def _finish_show_history(self, res: dict): self._finish_action(ACTION_HISTORY) if res.get('error'): self._handle_console_option(True) self.textarea_details.appendPlainText(res['error']) self.check_details.setChecked(True) elif not res['history'].history: dialog.show_message(title=self.i18n['action.history.no_history.title'], body=self.i18n['action.history.no_history.body'].format(bold(res['history'].pkg.name)), type_=MessageType.WARNING) else: dialog_history = HistoryDialog(res['history'], self.icon_cache, self.i18n) dialog_history.exec_() def _begin_search(self, word, action_id: int = None): self.filter_updates = False self._begin_action('{} {}'.format(self.i18n['manage_window.status.searching'], word if word else ''), action_id=action_id) def search(self): word = self.search_bar.text().strip() if word: self._handle_console(False) self._begin_search(word, action_id=ACTION_SEARCH) self.comp_manager.set_components_visible(False) self.thread_search.word = word self.thread_search.start() def _finish_search(self, res: dict): self._finish_action() self.search_performed = True if not res['error']: self.comp_manager.set_group_visible(GROUP_VIEW_SEARCH, True) self.update_pkgs(res['pkgs_found'], as_installed=False, ignore_updates=True) self._set_lower_buttons_visible(True) self._update_bts_installed_and_suggestions() self._hide_filters_no_packages() self._reorganize() else: self.comp_manager.restore_state(ACTION_SEARCH) dialog.show_message(title=self.i18n['warning'].capitalize(), body=self.i18n[res['error']], type_=MessageType.WARNING) def _ask_root_password(self, action: SoftwareAction, pkg: PackageView) -> Tuple[Optional[str], bool]: pwd = None requires_root = self.manager.requires_root(action, pkg.model) if not user.is_root() and requires_root: valid, pwd = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager) if not valid: return pwd, False return pwd, True def install(self, pkg: PackageView): pwd, proceed = self._ask_root_password(SoftwareAction.INSTALL, pkg) if not proceed: return self._begin_action('{} {}'.format(self.i18n['manage_window.status.installing'], pkg.model.name), action_id=ACTION_INSTALL) self.comp_manager.set_groups_visible(False, GROUP_UPPER_BAR, GROUP_LOWER_BTS) self._handle_console_option(True) self.thread_install.pkg = pkg self.thread_install.root_pwd = pwd self.thread_install.start() def _finish_install(self, res: dict): self._finish_action(action_id=ACTION_INSTALL) console_output = self.textarea_details.toPlainText() if console_output: log_path = f"{LOGS_DIR}/install/{res['pkg'].model.get_type()}/{res['pkg'].model.name}" try: Path(log_path).mkdir(parents=True, exist_ok=True) log_file = f'{log_path}/{int(time.time())}.log' with open(log_file, 'w+') as f: f.write(console_output) self.textarea_details.appendPlainText(self.i18n['console.install_logs.path'].format('"{}"'.format(log_file))) except: self.textarea_details.appendPlainText("[warning] Could not write install log file to '{}'".format(log_path)) if res['success']: if self._can_notify_user(): util.notify_user(msg='{} ({}) {}'.format(res['pkg'].model.name, res['pkg'].model.get_type(), self.i18n['installed'])) models_updated = [] for key in ('installed', 'removed'): if res.get(key): models_updated.extend(res[key]) if models_updated: installed_available_idxs = [] for idx, available in enumerate(self.pkgs_available): for pidx, model in enumerate(models_updated): if available.model == model: available.update_model(model) if model.installed: installed_available_idxs.append((idx, pidx, available)) # re-indexing all installed so they always will be be displayed when no filters are applied if installed_available_idxs: # removing from available installed_available_idxs.sort(key=operator.itemgetter(0)) for decrement, data in enumerate(installed_available_idxs): del self.pkgs_available[data[0] - decrement] # re-inserting into the available installed_available_idxs.sort(key=operator.itemgetter(1)) for new_idx, data in enumerate(installed_available_idxs): self.pkgs_available.insert(new_idx, data[2]) # updating the respective table rows: for displayed in self.pkgs: for model in models_updated: if displayed.model == model: self.table_apps.update_package(displayed, change_update_col=True) self.update_bt_upgrade() # updating installed packages if res['removed'] and self.pkgs_installed: to_remove = [] for idx, installed in enumerate(self.pkgs_installed): for removed in res['removed']: if installed.model == removed: to_remove.append(idx) if to_remove: to_remove.sort() for decrement, idx in enumerate(to_remove): del self.pkgs_installed[idx - decrement] if res['installed']: for idx, model in enumerate(res['installed']): self.pkgs_installed.insert(idx, PackageView(model, self.i18n)) self.update_custom_actions() self.table_apps.change_headers_policy(policy=QHeaderView.Stretch, maximized=self._maximized) self.table_apps.change_headers_policy(policy=QHeaderView.ResizeToContents, maximized=self._maximized) self._resize(accept_lower_width=False) else: self._show_console_errors() if self._can_notify_user(): util.notify_user('{}: {}'.format(res['pkg'].model.name, self.i18n['notification.install.failed'])) def _update_progress(self, value: int): self.progress_bar.setValue(value) def begin_execute_custom_action(self, pkg: Optional[PackageView], action: CustomSoftwareAction): if pkg is None and action.requires_confirmation and \ not ConfirmationDialog(title=self.i18n['confirmation'].capitalize(), body='<p>{}</p>'.format(self.i18n['custom_action.proceed_with'].capitalize().format(bold(self.i18n[action.i18n_label_key]))), icon=QIcon(action.icon_path) if action.icon_path else QIcon(resource.get_path('img/logo.svg')), i18n=self.i18n).ask(): return False pwd = None if not user.is_root() and action.requires_root: valid, pwd = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager) if not valid: return self._begin_action(action_label='{}{}'.format(self.i18n[action.i18n_status_key], ' {}'.format(pkg.model.name) if pkg else ''), action_id=ACTION_CUSTOM_ACTION) self.comp_manager.set_components_visible(False) self._handle_console_option(True) self.thread_custom_action.pkg = pkg self.thread_custom_action.root_pwd = pwd self.thread_custom_action.custom_action = action self.thread_custom_action.start() def _finish_execute_custom_action(self, res: dict): self._finish_action() if res['success']: if res['action'].refresh: self.comp_manager.remove_saved_state(ACTION_CUSTOM_ACTION) self.begin_refresh_packages(pkg_types={res['pkg'].model.__class__} if res['pkg'] else None) else: self.comp_manager.restore_state(ACTION_CUSTOM_ACTION) self._show_console_checkbox_if_output() else: self.comp_manager.restore_state(ACTION_CUSTOM_ACTION) self._show_console_errors() if res['error']: dialog.show_message(title=self.i18n['warning' if res['error_type'] == MessageType.WARNING else 'error'].capitalize(), body=self.i18n[res['error']], type_=res['error_type']) def _show_console_checkbox_if_output(self): if self.textarea_details.toPlainText(): self.comp_manager.set_component_visible(CHECK_DETAILS, True) else: self.comp_manager.set_component_visible(CHECK_DETAILS, False) def show_settings(self): if self.settings_window: self.settings_window.handle_display() else: self.settings_window = SettingsWindow(self.manager, self.i18n, self.screen_size, self) self.settings_window.setMinimumWidth(int(self.screen_size.width() / 4)) self.settings_window.resize(self.size()) self.settings_window.adjustSize() qt_utils.centralize(self.settings_window) self.settings_window.show() def _map_custom_action(self, action: CustomSoftwareAction, parent: QWidget) -> QCustomMenuAction: if action.icon_path: try: if action.icon_path.startswith('/'): icon = QIcon(action.icon_path) else: icon = QIcon.fromTheme(action.icon_path) except: icon = None else: icon = None return QCustomMenuAction(parent=parent, label=self.i18n[action.i18n_label_key], action=lambda: self.begin_execute_custom_action(None, action), icon=icon) def show_custom_actions(self): if self.custom_actions: menu_row = QMenu() menu_row.setCursor(QCursor(Qt.PointingHandCursor)) actions = [self._map_custom_action(a, menu_row) for a in self.custom_actions] menu_row.addActions(actions) menu_row.adjustSize() menu_row.popup(QCursor.pos()) menu_row.exec_() def begin_ignore_updates(self, pkg: PackageView): status_key = 'ignore_updates' if not pkg.model.is_update_ignored() else 'ignore_updates_reverse' self._begin_action(action_label=self.i18n['manage_window.status.{}'.format(status_key)].format(pkg.model.name), action_id=ACTION_IGNORE_UPDATES) self.comp_manager.disable_visible() self.thread_ignore_updates.pkg = pkg self.thread_ignore_updates.start() def finish_ignore_updates(self, res: dict): self._finish_action(action_id=ACTION_IGNORE_UPDATES) if res['success']: hide_package = commons.is_package_hidden(res['pkg'], self._gen_filters()) if hide_package: idx_to_remove = None for pkg in self.pkgs: if pkg == res['pkg']: idx_to_remove = pkg.table_index break if idx_to_remove is not None: del self.pkgs[idx_to_remove] self.table_apps.removeRow(idx_to_remove) self._update_table_indexes() self.update_bt_upgrade() else: for pkg in self.pkgs: if pkg == res['pkg']: pkg.update_model(res['pkg'].model) self.table_apps.update_package(pkg, change_update_col=not any([self.search_performed, self.suggestions_requested])) self.update_bt_upgrade() break for pkg_list in (self.pkgs_available, self.pkgs_installed): if pkg_list: for pkg in pkg_list: if pkg == res['pkg']: pkg.update_model(res['pkg'].model) break self._add_pkg_categories(res['pkg']) dialog.show_message(title=self.i18n['success'].capitalize(), body=self.i18n['action.{}.success'.format(res['action'])].format(bold(res['pkg'].model.name)), type_=MessageType.INFO) else: dialog.show_message(title=self.i18n['fail'].capitalize(), body=self.i18n['action.{}.fail'.format(res['action'])].format(bold(res['pkg'].model.name)), type_=MessageType.ERROR) def _add_pkg_categories(self, pkg: PackageView): if pkg.model.categories: pkg_categories = {c.strip().lower() for c in pkg.model.categories if c and c.strip()} if pkg_categories: current_categories = self._get_current_categories() if current_categories: pkg_categories = {c.strip().lower() for c in pkg.model.categories if c} if pkg_categories: categories_to_add = {c for c in pkg_categories if c and c not in current_categories} if categories_to_add: for cat in categories_to_add: self.__add_category(cat) else: self._update_categories(pkg_categories) def _map_theme_action(self, theme: ThemeMetadata, menu: QMenu) -> QCustomMenuAction: def _change_theme(): set_theme(theme_key=theme.key, app=QApplication.instance(), logger=self.context.logger) self.thread_save_theme.theme_key = theme.key self.thread_save_theme.start() return QCustomMenuAction(label=theme.get_i18n_name(self.i18n), action=_change_theme, parent=menu, tooltip=theme.get_i18n_description(self.i18n)) def show_themes(self): menu_row = QMenu() menu_row.setCursor(QCursor(Qt.PointingHandCursor)) menu_row.addActions(self._map_theme_actions(menu_row)) menu_row.adjustSize() menu_row.popup(QCursor.pos()) menu_row.exec_() def _map_theme_actions(self, menu: QMenu) -> List[QCustomMenuAction]: core_config = CoreConfigManager().get_config() current_theme_key, current_action = core_config['ui']['theme'], None actions = [] for t in read_all_themes_metadata(): if not t.abstract: action = self._map_theme_action(t, menu) if current_action is None and current_theme_key is not None and current_theme_key == t.key: action.button.setProperty('current', 'true') current_action = action else: actions.append(action) if not current_action: invalid_action = QCustomMenuAction(label=self.i18n['manage_window.bt_themes.option.invalid'], parent=menu) invalid_action.button.setProperty('current', 'true') current_action = invalid_action actions.sort(key=lambda a: a.get_label()) actions.insert(0, current_action) return actions def reload(self): self.thread_reload.start() def _reload(self): self.update_custom_actions() self.verify_warnings() self.types_changed = True self.begin_refresh_packages()
self._finish_action() if res['success']: self.comp_manager.remove_saved_state(ACTION_DOWNGRADE) if self._can_notify_user(): util.notify_user('{} {}'.format(res['app'], self.i18n['downgraded'])) self.begin_refresh_packages(pkg_types={res['app'].model.__class__} if len(self.pkgs) > 1 else None) self._show_console_checkbox_if_output() self.update_custom_actions() notify_tray() else: self.comp_manager.restore_state(ACTION_DOWNGRADE) self._show_console_errors() if self._can_notify_user(): util.notify_user(self.i18n['notification.downgrade.failed'])
1_validate_subsequence.py
def validate_subseuence(arr, seq):
print(validate_subseuence([1,2,3,4,5,6], [2,4,6])) print(validate_subseuence([1,2,3,4,5,6], [4,2,6]))
arr_idx=0 seq_idx=0 while(arr_idx<len(arr) and seq_idx<len(seq)): if arr[arr_idx]== seq[seq_idx]: seq_idx+=1 arr_idx+=1 return seq_idx==len(seq)
lib.rs
use bevy::{input::touch::TouchPhase, prelude::*, window::WindowMode};
App::new() .insert_resource(WindowDescriptor { vsync: true, resizable: false, mode: WindowMode::BorderlessFullscreen, ..Default::default() }) .insert_resource(Msaa { samples: 4 }) .add_plugins(DefaultPlugins) .add_startup_system(setup_scene) .add_startup_system(setup_music) .add_system(touch_camera) .run(); } fn touch_camera( windows: ResMut<Windows>, mut touches: EventReader<TouchInput>, mut camera: Query<&mut Transform, With<Camera>>, mut last_position: Local<Option<Vec2>>, ) { for touch in touches.iter() { if touch.phase == TouchPhase::Started { *last_position = None; } if let Some(last_position) = *last_position { let window = windows.get_primary().unwrap(); let mut transform = camera.single_mut(); *transform = Transform::from_xyz( transform.translation.x + (touch.position.x - last_position.x) / window.width() * 5.0, transform.translation.y, transform.translation.z + (touch.position.y - last_position.y) / window.height() * 5.0, ) .looking_at(Vec3::ZERO, Vec3::Y); } *last_position = Some(touch.position); } } /// set up a simple 3D scene fn setup_scene( mut commands: Commands, mut meshes: ResMut<Assets<Mesh>>, mut materials: ResMut<Assets<StandardMaterial>>, ) { // plane commands.spawn_bundle(PbrBundle { mesh: meshes.add(Mesh::from(shape::Plane { size: 5.0 })), material: materials.add(Color::rgb(0.1, 0.2, 0.1).into()), ..Default::default() }); // cube commands.spawn_bundle(PbrBundle { mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()), transform: Transform::from_xyz(0.0, 0.5, 0.0), ..Default::default() }); // sphere commands.spawn_bundle(PbrBundle { mesh: meshes.add(Mesh::from(shape::Icosphere { subdivisions: 4, radius: 0.5, })), material: materials.add(Color::rgb(0.1, 0.4, 0.8).into()), transform: Transform::from_xyz(1.5, 1.5, 1.5), ..Default::default() }); // light commands.spawn_bundle(PointLightBundle { transform: Transform::from_xyz(4.0, 8.0, 4.0), point_light: PointLight { intensity: 5000.0, shadows_enabled: true, ..Default::default() }, ..Default::default() }); // camera commands.spawn_bundle(PerspectiveCameraBundle { transform: Transform::from_xyz(-2.0, 2.5, 5.0).looking_at(Vec3::ZERO, Vec3::Y), ..Default::default() }); } fn setup_music(asset_server: Res<AssetServer>, audio: Res<Audio>) { let music = asset_server.load("sounds/Windless Slopes.ogg"); audio.play(music); }
// the `bevy_main` proc_macro generates the required ios boilerplate #[bevy_main] fn main() {
api_video.test.js
import server from '../index'; import request from 'supertest';
describe('Videos API request test',()=>{ let res; it('Requesting videos of the "Rain" query', async ()=>{ res = await request(server).get('/videos?q=rain'); expect(res.status).toBe(200); }); it('Requesting video by "id"', async ()=>{ res = await request(server).get('/video?id=mPZkdNFkNps'); expect(res.status).toBe(200); }); });
manifest.rs
use std::collections::{HashMap, HashSet}; use std::env; use std::fs; use std::path::{Path, PathBuf}; use std::str::FromStr; use config::{Config, File}; use serde::{Deserialize, Serialize}; use serde_with::rust::string_empty_as_none; use crate::commands::validate_worker_name; use crate::settings::toml::deploy_config::{DeployConfig, RouteConfig}; use crate::settings::toml::environment::Environment; use crate::settings::toml::kv_namespace::{ConfigKvNamespace, KvNamespace}; use crate::settings::toml::site::Site; use crate::settings::toml::target_type::TargetType; use crate::settings::toml::Target; use crate::terminal::{emoji, message, styles}; #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] pub struct
{ #[serde(default)] pub name: String, #[serde(rename = "type")] pub target_type: TargetType, #[serde(default)] pub account_id: String, pub workers_dev: Option<bool>, #[serde(default, with = "string_empty_as_none")] pub route: Option<String>, pub routes: Option<Vec<String>>, #[serde(default, with = "string_empty_as_none")] pub zone_id: Option<String>, pub webpack_config: Option<String>, pub private: Option<bool>, // TODO: maybe one day, serde toml support will allow us to serialize sites // as a TOML inline table (this would prevent confusion with environments too!) pub site: Option<Site>, #[serde(alias = "kv-namespaces")] pub kv_namespaces: Option<Vec<ConfigKvNamespace>>, pub env: Option<HashMap<String, Environment>>, pub vars: Option<HashMap<String, String>>, } impl Manifest { pub fn new(config_path: &Path) -> Result<Self, failure::Error> { failure::ensure!( config_path.exists(), "wrangler.toml not found; run `wrangler init` to create one." ); let config = read_config(config_path)?; let manifest: Manifest = match config.try_into() { Ok(m) => m, Err(e) => { if e.to_string().contains("unknown field `kv-namespaces`") { failure::bail!("kv-namespaces should not live under the [site] table in wrangler.toml; please move it above [site].") } else { failure::bail!(e) } } }; check_for_duplicate_names(&manifest)?; Ok(manifest) } pub fn generate( name: String, target_type: Option<TargetType>, config_path: &PathBuf, site: Option<Site>, ) -> Result<Manifest, failure::Error> { let config_file = config_path.join("wrangler.toml"); let template_config_content = fs::read_to_string(&config_file); let template_config = match &template_config_content { Ok(content) => { let config: Manifest = toml::from_str(content)?; config.warn_on_account_info(); if let Some(target_type) = &target_type { if config.target_type != *target_type { message::warn(&format!("The template recommends the \"{}\" type. Using type \"{}\" may cause errors, we recommend changing the type field in wrangler.toml to \"{}\"", config.target_type, target_type, config.target_type)); } } Ok(config) } Err(err) => Err(err), }; let mut template_config = match template_config { Ok(config) => config, Err(err) => { log::info!("Error parsing template {}", err); log::debug!("template content {:?}", template_config_content); Manifest::default() } }; let default_workers_dev = match &template_config.route { Some(route) => { if route.is_empty() { Some(true) } else { None } } None => Some(true), }; template_config.name = name; template_config.workers_dev = default_workers_dev; if let Some(target_type) = &target_type { template_config.target_type = target_type.clone(); } if let Some(arg_site) = site { if template_config.site.is_none() { template_config.site = Some(arg_site); } } // TODO: https://github.com/cloudflare/wrangler/issues/773 let toml = toml::to_string(&template_config)?; log::info!("Writing a wrangler.toml file at {}", config_file.display()); fs::write(&config_file, &toml)?; Ok(template_config) } pub fn worker_name(&self, env_arg: Option<&str>) -> String { if let Some(environment) = self.get_environment(env_arg).unwrap_or_default() { if let Some(name) = &environment.name { return name.clone(); } if let Some(env) = env_arg { return format!("{}-{}", self.name, env); } } self.name.clone() } fn route_config(&self) -> RouteConfig { RouteConfig { account_id: Some(self.account_id.clone()), workers_dev: self.workers_dev, route: self.route.clone(), routes: self.routes.clone(), zone_id: self.zone_id.clone(), } } pub fn deploy_config(&self, env: Option<&str>) -> Result<DeployConfig, failure::Error> { let script = self.worker_name(env); validate_worker_name(&script)?; if let Some(environment) = self.get_environment(env)? { // if there is an environment level deploy target, try to return that if let Some(env_route_config) = environment.route_config(self.account_id.clone(), self.zone_id.clone()) { DeployConfig::build(&script, &env_route_config) } else { // If the top level config is Zoned, the user needs to specify new route config let top_level_config = DeployConfig::build(&script, &self.route_config())?; match top_level_config { DeployConfig::Zoned(_) => failure::bail!( "you must specify route(s) per environment for zoned deploys." ), DeployConfig::Zoneless(_) => Ok(top_level_config), } } } else { DeployConfig::build(&script, &self.route_config()) } } pub fn get_target( &self, environment_name: Option<&str>, preview: bool, ) -> Result<Target, failure::Error> { // Site projects are always webpack for now; don't let toml override this. let target_type = match self.site { Some(_) => TargetType::Webpack, None => self.target_type.clone(), }; let mut target = Target { target_type, // MUST inherit account_id: self.account_id.clone(), // MAY inherit webpack_config: self.webpack_config.clone(), // MAY inherit // importantly, the top level name will be modified // to include the name of the environment name: self.name.clone(), // MAY inherit kv_namespaces: get_namespaces(self.kv_namespaces.clone(), preview)?, // MUST NOT inherit site: self.site.clone(), // MUST NOT inherit vars: self.vars.clone(), // MAY inherit, }; let environment = self.get_environment(environment_name)?; if let Some(environment) = environment { target.name = self.worker_name(environment_name); if let Some(account_id) = &environment.account_id { target.account_id = account_id.clone(); } if let Some(webpack_config) = &environment.webpack_config { target.webpack_config = Some(webpack_config.clone()); } // don't inherit kv namespaces because it is an anti-pattern to use the same namespaces across multiple environments target.kv_namespaces = get_namespaces(environment.kv_namespaces.clone(), preview)?; // don't inherit vars target.vars = environment.vars.clone(); } Ok(target) } pub fn get_environment( &self, environment_name: Option<&str>, ) -> Result<Option<&Environment>, failure::Error> { // check for user-specified environment name if let Some(environment_name) = environment_name { if let Some(environment_table) = &self.env { if let Some(environment) = environment_table.get(environment_name) { Ok(Some(environment)) } else { failure::bail!(format!( "{} Could not find environment with name \"{}\"", emoji::WARN, environment_name )) } } else { failure::bail!(format!( "{} There are no environments specified in your wrangler.toml", emoji::WARN )) } } else { Ok(None) } } fn warn_on_account_info(&self) { let account_id_env = env::var("CF_ACCOUNT_ID").is_ok(); let zone_id_env = env::var("CF_ZONE_ID").is_ok(); let mut top_level_fields: Vec<String> = Vec::new(); if !account_id_env { top_level_fields.push("account_id".to_string()); } if let Some(kv_namespaces) = &self.kv_namespaces { for kv_namespace in kv_namespaces { top_level_fields.push(format!( "kv-namespace {} needs a namespace_id", kv_namespace.binding )); } } if let Some(route) = &self.route { if !route.is_empty() { top_level_fields.push("route".to_string()); } } if let Some(zone_id) = &self.zone_id { if !zone_id.is_empty() && !zone_id_env { top_level_fields.push("zone_id".to_string()); } } let mut env_fields: HashMap<String, Vec<String>> = HashMap::new(); if let Some(env) = &self.env { for (env_name, env) in env { let mut current_env_fields: Vec<String> = Vec::new(); if env.account_id.is_some() && !account_id_env { current_env_fields.push("account_id".to_string()); } if let Some(kv_namespaces) = &env.kv_namespaces { for kv_namespace in kv_namespaces { current_env_fields.push(format!( "kv-namespace {} needs a namespace_id", kv_namespace.binding )); } } if let Some(route) = &env.route { if !route.is_empty() { current_env_fields.push("route".to_string()); } } if let Some(zone_id) = &env.zone_id { if !zone_id.is_empty() && !zone_id_env { current_env_fields.push("zone_id".to_string()); } } if !current_env_fields.is_empty() { env_fields.insert(env_name.to_string(), current_env_fields); } } } let has_top_level_fields = !top_level_fields.is_empty(); let has_env_fields = !env_fields.is_empty(); let mut needs_new_line = false; if has_top_level_fields || has_env_fields { let toml_msg = styles::highlight("wrangler.toml"); let account_id_msg = styles::highlight("account_id"); let zone_id_msg = styles::highlight("zone_id"); let dash_url = styles::url("https://dash.cloudflare.com"); message::help( &format!("You will need to update the following fields in the created {} file before continuing:", toml_msg) ); message::help(&format!( "You can find your {} in the right sidebar of your account's Workers page, and {} in the right sidebar of a zone's overview tab at {}", account_id_msg, zone_id_msg, dash_url )); if has_top_level_fields { needs_new_line = true; for top_level_field in top_level_fields { println!("- {}", top_level_field); } } if has_env_fields { for (env_name, env_fields) in env_fields { if needs_new_line { println!(); } println!("[env.{}]", env_name); needs_new_line = true; for env_field in env_fields { println!(" - {}", env_field); } } } } } } impl FromStr for Manifest { type Err = toml::de::Error; fn from_str(serialized_toml: &str) -> Result<Self, Self::Err> { toml::from_str(serialized_toml) } } fn read_config(config_path: &Path) -> Result<Config, failure::Error> { let mut config = Config::new(); let config_str = config_path .to_str() .expect("project config path should be a string"); config.merge(File::with_name(config_str))?; // Eg.. `CF_ACCOUNT_AUTH_KEY=farts` would set the `account_auth_key` key config.merge(config::Environment::with_prefix("CF"))?; Ok(config) } fn check_for_duplicate_names(manifest: &Manifest) -> Result<(), failure::Error> { let mut names: HashSet<String> = HashSet::new(); let mut duplicate_names: HashSet<String> = HashSet::new(); names.insert(manifest.name.to_string()); if let Some(environments) = &manifest.env { for (_, environment) in environments.iter() { if let Some(name) = &environment.name { if names.contains(name) && !duplicate_names.contains(name) { duplicate_names.insert(name.to_string()); } else { names.insert(name.to_string()); } } } } let duplicate_name_string = duplicate_names .clone() .into_iter() .collect::<Vec<String>>() .join(", "); let duplicate_message = match duplicate_names.len() { 1 => Some("this name is duplicated".to_string()), n if n >= 2 => Some("these names are duplicated".to_string()), _ => None, }; if let Some(message) = duplicate_message { failure::bail!(format!( "{} Each name in your `wrangler.toml` must be unique, {}: {}", emoji::WARN, message, duplicate_name_string )) } Ok(()) } fn get_namespaces( kv_namespaces: Option<Vec<ConfigKvNamespace>>, preview: bool, ) -> Result<Vec<KvNamespace>, failure::Error> { if let Some(namespaces) = kv_namespaces { namespaces.into_iter().map(|ns| { if preview { if let Some(preview_id) = &ns.preview_id { if let Some(id) = &ns.id { if preview_id == id { message::warn("Specifying the same KV namespace ID for both preview and production sessions may cause bugs in your production worker! Proceed with caution."); } } Ok(KvNamespace { id: preview_id.to_string(), binding: ns.binding.to_string(), }) } else { failure::bail!("In order to preview a worker with KV namespaces, you must designate a preview_id for each KV namespace you'd like to preview.") } } else if let Some(id) = &ns.id { Ok(KvNamespace { id: id.to_string(), binding: ns.binding, }) } else { failure::bail!("You must specify the namespace ID in the id field for the namespace with binding \"{}\"", &ns.binding) } }).collect() } else { Ok(Vec::new()) } }
Manifest
ihc.py
"""IHC switch platform. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/switch.ihc/ """ import voluptuous as vol from homeassistant.components.ihc import ( validate_name, IHC_DATA, IHC_CONTROLLER, IHC_INFO) from homeassistant.components.ihc.ihcdevice import IHCDevice from homeassistant.components.switch import SwitchDevice, PLATFORM_SCHEMA from homeassistant.const import CONF_ID, CONF_NAME, CONF_SWITCHES import homeassistant.helpers.config_validation as cv DEPENDENCIES = ['ihc'] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_SWITCHES, default=[]): vol.All(cv.ensure_list, [ vol.All({ vol.Required(CONF_ID): cv.positive_int, vol.Optional(CONF_NAME): cv.string, }, validate_name) ]) }) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the IHC switch platform.""" ihc_controller = hass.data[IHC_DATA][IHC_CONTROLLER] info = hass.data[IHC_DATA][IHC_INFO] devices = [] if discovery_info: for name, device in discovery_info.items(): ihc_id = device['ihc_id'] product = device['product'] switch = IHCSwitch(ihc_controller, name, ihc_id, info, product) devices.append(switch) else: switches = config[CONF_SWITCHES] for switch in switches: ihc_id = switch[CONF_ID] name = switch[CONF_NAME] sensor = IHCSwitch(ihc_controller, name, ihc_id, info) devices.append(sensor) add_devices(devices) class IHCSwitch(IHCDevice, SwitchDevice): """IHC Switch.""" def __init__(self, ihc_controller, name: str, ihc_id: int, info: bool, product=None) -> None: """Initialize the IHC switch.""" super().__init__(ihc_controller, name, ihc_id, product) self._state = False @property def is_on(self): """Return true if switch is on.""" return self._state def turn_on(self, **kwargs): """Turn the switch on.""" self.ihc_controller.set_runtime_value_bool(self.ihc_id, True) def turn_off(self, **kwargs): """Turn the device off.""" self.ihc_controller.set_runtime_value_bool(self.ihc_id, False) def on_ihc_change(self, ihc_id, value): """Handle IHC resource change."""
self.schedule_update_ha_state()
self._state = value
menu.js
/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import { ChangeDetectionStrategy, Component, ContentChildren, Directive, ElementRef, EventEmitter, Inject, InjectionToken, Input, NgModule, Optional, Output, Self, TemplateRef, ViewChild, ViewContainerRef, ViewEncapsulation } from '@angular/core'; import { CommonModule } from '@angular/common'; import { ESCAPE, LEFT_ARROW, MATERIAL_COMPATIBILITY_MODE, MdCommonModule, MdRippleModule, RIGHT_ARROW, mixinDisabled } from '@angular/material/core'; import { Overlay, OverlayConfig, OverlayModule } from '@angular/cdk/overlay'; import { Subject } from 'rxjs/Subject'; import { FocusKeyManager, isFakeMousedownFromScreenReader } from '@angular/cdk/a11y'; import { Subscription } from 'rxjs/Subscription'; import { animate, state, style, transition, trigger } from '@angular/animations'; import { merge } from 'rxjs/observable/merge'; import { RxChain, filter, startWith, switchMap } from '@angular/cdk/rxjs'; import { Directionality } from '@angular/cdk/bidi'; import { LEFT_ARROW as LEFT_ARROW$1, RIGHT_ARROW as RIGHT_ARROW$1 } from '@angular/cdk/keycodes'; import { TemplatePortal } from '@angular/cdk/portal'; import { of } from 'rxjs/observable/of'; /** * Throws an exception for the case when menu trigger doesn't have a valid md-menu instance * \@docs-private * @return {?} */ function throwMdMenuMissingError() { throw Error(`md-menu-trigger: must pass in an md-menu instance. Example: <md-menu #menu="mdMenu"></md-menu> <button [mdMenuTriggerFor]="menu"></button>`); } /** * Throws an exception for the case when menu's x-position value isn't valid. * In other words, it doesn't match 'before' or 'after'. * \@docs-private * @return {?} */ function
() { throw Error(`x-position value must be either 'before' or after'. Example: <md-menu x-position="before" #menu="mdMenu"></md-menu>`); } /** * Throws an exception for the case when menu's y-position value isn't valid. * In other words, it doesn't match 'above' or 'below'. * \@docs-private * @return {?} */ function throwMdMenuInvalidPositionY() { throw Error(`y-position value must be either 'above' or below'. Example: <md-menu y-position="above" #menu="mdMenu"></md-menu>`); } /** * \@docs-private */ class MdMenuItemBase { } const _MdMenuItemMixinBase = mixinDisabled(MdMenuItemBase); /** * This directive is intended to be used inside an md-menu tag. * It exists mostly to set the role attribute. */ class MdMenuItem extends _MdMenuItemMixinBase { /** * @param {?} _elementRef */ constructor(_elementRef) { super(); this._elementRef = _elementRef; /** * Stream that emits when the menu item is hovered. */ this.hover = new Subject(); /** * Whether the menu item is highlighted. */ this._highlighted = false; /** * Whether the menu item acts as a trigger for a sub-menu. */ this._triggersSubmenu = false; } /** * Focuses the menu item. * @return {?} */ focus() { this._getHostElement().focus(); } /** * @return {?} */ ngOnDestroy() { this.hover.complete(); } /** * Used to set the `tabindex`. * @return {?} */ _getTabIndex() { return this.disabled ? '-1' : '0'; } /** * Returns the host DOM element. * @return {?} */ _getHostElement() { return this._elementRef.nativeElement; } /** * Prevents the default element actions if it is disabled. * @param {?} event * @return {?} */ _checkDisabled(event) { if (this.disabled) { event.preventDefault(); event.stopPropagation(); } } /** * Emits to the hover stream. * @return {?} */ _emitHoverEvent() { if (!this.disabled) { this.hover.next(this); } } } MdMenuItem.decorators = [ { type: Component, args: [{selector: '[md-menu-item], [mat-menu-item]', inputs: ['disabled'], host: { 'role': 'menuitem', 'class': 'mat-menu-item', '[class.mat-menu-item-highlighted]': '_highlighted', '[class.mat-menu-item-submenu-trigger]': '_triggersSubmenu', '[attr.tabindex]': '_getTabIndex()', '[attr.aria-disabled]': 'disabled.toString()', '[attr.disabled]': 'disabled || null', '(click)': '_checkDisabled($event)', '(mouseenter)': '_emitHoverEvent()', }, changeDetection: ChangeDetectionStrategy.OnPush, encapsulation: ViewEncapsulation.None, preserveWhitespaces: false, template: "<ng-content></ng-content><div class=\"mat-menu-ripple\" *ngIf=\"!disabled\" mat-ripple [matRippleTrigger]=\"_getHostElement()\"></div>", exportAs: 'mdMenuItem, matMenuItem', viewProviders: [{ provide: MATERIAL_COMPATIBILITY_MODE, useValue: true }], },] }, ]; /** * @nocollapse */ MdMenuItem.ctorParameters = () => [ { type: ElementRef, }, ]; /** * Below are all the animations for the md-menu component. * Animation duration and timing values are based on: * https://material.io/guidelines/components/menus.html#menus-usage */ /** * This animation controls the menu panel's entry and exit from the page. * * When the menu panel is added to the DOM, it scales in and fades in its border. * * When the menu panel is removed from the DOM, it simply fades out after a brief * delay to display the ripple. */ // TODO(kara): switch to :enter and :leave once Mobile Safari is sorted out. const transformMenu = trigger('transformMenu', [ state('void', style({ opacity: 0, // This starts off from 0.01, instead of 0, because there's an issue in the Angular animations // as of 4.2, which causes the animation to be skipped if it starts from 0. transform: 'scale(0.01, 0.01)' })), state('enter-start', style({ opacity: 1, transform: 'scale(1, 0.5)' })), state('enter', style({ transform: 'scale(1, 1)' })), transition('void => enter-start', animate('100ms linear')), transition('enter-start => enter', animate('300ms cubic-bezier(0.25, 0.8, 0.25, 1)')), transition('* => void', animate('150ms 50ms linear', style({ opacity: 0 }))) ]); /** * This animation fades in the background color and content of the menu panel * after its containing element is scaled in. */ const fadeInItems = trigger('fadeInItems', [ state('showing', style({ opacity: 1 })), transition('void => *', [ style({ opacity: 0 }), animate('400ms 100ms cubic-bezier(0.55, 0, 0.55, 0.2)') ]) ]); /** * Injection token to be used to override the default options for `md-menu`. */ const MD_MENU_DEFAULT_OPTIONS = new InjectionToken('md-menu-default-options'); /** * Start elevation for the menu panel. * \@docs-private */ const MD_MENU_BASE_ELEVATION = 2; class MdMenu { /** * @param {?} _elementRef * @param {?} _defaultOptions */ constructor(_elementRef, _defaultOptions) { this._elementRef = _elementRef; this._defaultOptions = _defaultOptions; this._xPosition = this._defaultOptions.xPosition; this._yPosition = this._defaultOptions.yPosition; /** * Subscription to tab events on the menu panel */ this._tabSubscription = Subscription.EMPTY; /** * Config object to be passed into the menu's ngClass */ this._classList = {}; /** * Current state of the panel animation. */ this._panelAnimationState = 'void'; /** * Whether the menu should overlap its trigger. */ this.overlapTrigger = this._defaultOptions.overlapTrigger; /** * Event emitted when the menu is closed. */ this.close = new EventEmitter(); } /** * Position of the menu in the X axis. * @return {?} */ get xPosition() { return this._xPosition; } /** * @param {?} value * @return {?} */ set xPosition(value) { if (value !== 'before' && value !== 'after') { throwMdMenuInvalidPositionX(); } this._xPosition = value; this.setPositionClasses(); } /** * Position of the menu in the Y axis. * @return {?} */ get yPosition() { return this._yPosition; } /** * @param {?} value * @return {?} */ set yPosition(value) { if (value !== 'above' && value !== 'below') { throwMdMenuInvalidPositionY(); } this._yPosition = value; this.setPositionClasses(); } /** * This method takes classes set on the host md-menu element and applies them on the * menu template that displays in the overlay container. Otherwise, it's difficult * to style the containing menu from outside the component. * @param {?} classes list of class names * @return {?} */ set classList(classes) { if (classes && classes.length) { this._classList = classes.split(' ').reduce((obj, className) => { obj[className] = true; return obj; }, {}); this._elementRef.nativeElement.className = ''; this.setPositionClasses(); } } /** * @return {?} */ ngAfterContentInit() { this._keyManager = new FocusKeyManager(this.items).withWrap(); this._tabSubscription = this._keyManager.tabOut.subscribe(() => this.close.emit('keydown')); } /** * @return {?} */ ngOnDestroy() { this._tabSubscription.unsubscribe(); this.close.emit(); this.close.complete(); } /** * Stream that emits whenever the hovered menu item changes. * @return {?} */ hover() { return RxChain.from(this.items.changes) .call(startWith, this.items) .call(switchMap, (items) => merge(...items.map(item => item.hover))) .result(); } /** * Handle a keyboard event from the menu, delegating to the appropriate action. * @param {?} event * @return {?} */ _handleKeydown(event) { switch (event.keyCode) { case ESCAPE: this.close.emit('keydown'); event.stopPropagation(); break; case LEFT_ARROW: if (this.parentMenu && this.direction === 'ltr') { this.close.emit('keydown'); } break; case RIGHT_ARROW: if (this.parentMenu && this.direction === 'rtl') { this.close.emit('keydown'); } break; default: this._keyManager.onKeydown(event); } } /** * Focus the first item in the menu. This method is used by the menu trigger * to focus the first item when the menu is opened by the ENTER key. * @return {?} */ focusFirstItem() { this._keyManager.setFirstItemActive(); } /** * It's necessary to set position-based classes to ensure the menu panel animation * folds out from the correct direction. * @param {?=} posX * @param {?=} posY * @return {?} */ setPositionClasses(posX = this.xPosition, posY = this.yPosition) { this._classList['mat-menu-before'] = posX === 'before'; this._classList['mat-menu-after'] = posX === 'after'; this._classList['mat-menu-above'] = posY === 'above'; this._classList['mat-menu-below'] = posY === 'below'; } /** * Sets the menu panel elevation. * @param {?} depth Number of parent menus that come before the menu. * @return {?} */ setElevation(depth) { // The elevation starts at the base and increases by one for each level. const /** @type {?} */ newElevation = `mat-elevation-z${MD_MENU_BASE_ELEVATION + depth}`; const /** @type {?} */ customElevation = Object.keys(this._classList).find(c => c.startsWith('mat-elevation-z')); if (!customElevation || customElevation === this._previousElevation) { if (this._previousElevation) { this._classList[this._previousElevation] = false; } this._classList[newElevation] = true; this._previousElevation = newElevation; } } /** * Starts the enter animation. * @return {?} */ _startAnimation() { this._panelAnimationState = 'enter-start'; } /** * Resets the panel animation to its initial state. * @return {?} */ _resetAnimation() { this._panelAnimationState = 'void'; } /** * Callback that is invoked when the panel animation completes. * @param {?} event * @return {?} */ _onAnimationDone(event) { // After the initial expansion is done, trigger the second phase of the enter animation. if (event.toState === 'enter-start') { this._panelAnimationState = 'enter'; } } } MdMenu.decorators = [ { type: Component, args: [{selector: 'md-menu, mat-menu', template: "<ng-template><div class=\"mat-menu-panel\" [ngClass]=\"_classList\" (keydown)=\"_handleKeydown($event)\" (click)=\"close.emit('click')\" [@transformMenu]=\"_panelAnimationState\" (@transformMenu.done)=\"_onAnimationDone($event)\" role=\"menu\"><div class=\"mat-menu-content\" [@fadeInItems]=\"'showing'\"><ng-content></ng-content></div></div></ng-template>", styles: [".mat-menu-panel{min-width:112px;max-width:280px;overflow:auto;-webkit-overflow-scrolling:touch;max-height:calc(100vh - 48px);border-radius:2px}.mat-menu-panel:not([class*=mat-elevation-z]){box-shadow:0 3px 1px -2px rgba(0,0,0,.2),0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12)}.mat-menu-panel.mat-menu-after.mat-menu-below{transform-origin:left top}.mat-menu-panel.mat-menu-after.mat-menu-above{transform-origin:left bottom}.mat-menu-panel.mat-menu-before.mat-menu-below{transform-origin:right top}.mat-menu-panel.mat-menu-before.mat-menu-above{transform-origin:right bottom}[dir=rtl] .mat-menu-panel.mat-menu-after.mat-menu-below{transform-origin:right top}[dir=rtl] .mat-menu-panel.mat-menu-after.mat-menu-above{transform-origin:right bottom}[dir=rtl] .mat-menu-panel.mat-menu-before.mat-menu-below{transform-origin:left top}[dir=rtl] .mat-menu-panel.mat-menu-before.mat-menu-above{transform-origin:left bottom}.mat-menu-panel.ng-animating{pointer-events:none}@media screen and (-ms-high-contrast:active){.mat-menu-panel{outline:solid 1px}}.mat-menu-content{padding-top:8px;padding-bottom:8px}.mat-menu-item{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;cursor:pointer;outline:0;border:none;-webkit-tap-highlight-color:transparent;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;display:block;line-height:48px;height:48px;padding:0 16px;text-align:left;text-decoration:none;position:relative}.mat-menu-item[disabled]{cursor:default}[dir=rtl] .mat-menu-item{text-align:right}.mat-menu-item .mat-icon{margin-right:16px}[dir=rtl] .mat-menu-item .mat-icon{margin-left:16px;margin-right:0}.mat-menu-item .mat-icon{vertical-align:middle}.mat-menu-item-submenu-trigger{padding-right:32px}.mat-menu-item-submenu-trigger::after{width:0;height:0;border-style:solid;border-width:5px 0 5px 5px;border-color:transparent transparent transparent currentColor;content:'';display:inline-block;position:absolute;top:50%;right:16px;transform:translateY(-50%)}[dir=rtl] .mat-menu-item-submenu-trigger{padding-right:8px;padding-left:32px}[dir=rtl] .mat-menu-item-submenu-trigger::after{right:auto;left:16px;transform:rotateY(180deg) translateY(-50%)}button.mat-menu-item{width:100%}.mat-menu-ripple{top:0;left:0;right:0;bottom:0;position:absolute}"], changeDetection: ChangeDetectionStrategy.OnPush, encapsulation: ViewEncapsulation.None, preserveWhitespaces: false, animations: [ transformMenu, fadeInItems ], exportAs: 'mdMenu, matMenu' },] }, ]; /** * @nocollapse */ MdMenu.ctorParameters = () => [ { type: ElementRef, }, { type: undefined, decorators: [{ type: Inject, args: [MD_MENU_DEFAULT_OPTIONS,] },] }, ]; MdMenu.propDecorators = { 'xPosition': [{ type: Input },], 'yPosition': [{ type: Input },], 'templateRef': [{ type: ViewChild, args: [TemplateRef,] },], 'items': [{ type: ContentChildren, args: [MdMenuItem,] },], 'overlapTrigger': [{ type: Input },], 'classList': [{ type: Input, args: ['class',] },], 'close': [{ type: Output },], }; /** * Injection token that determines the scroll handling while the menu is open. */ const MD_MENU_SCROLL_STRATEGY = new InjectionToken('md-menu-scroll-strategy'); /** * \@docs-private * @param {?} overlay * @return {?} */ function MD_MENU_SCROLL_STRATEGY_PROVIDER_FACTORY(overlay) { return () => overlay.scrollStrategies.reposition(); } /** * \@docs-private */ const MD_MENU_SCROLL_STRATEGY_PROVIDER = { provide: MD_MENU_SCROLL_STRATEGY, deps: [Overlay], useFactory: MD_MENU_SCROLL_STRATEGY_PROVIDER_FACTORY, }; /** * Default top padding of the menu panel. */ const MENU_PANEL_TOP_PADDING = 8; /** * This directive is intended to be used in conjunction with an md-menu tag. It is * responsible for toggling the display of the provided menu instance. */ class MdMenuTrigger { /** * @param {?} _overlay * @param {?} _element * @param {?} _viewContainerRef * @param {?} _scrollStrategy * @param {?} _parentMenu * @param {?} _menuItemInstance * @param {?} _dir */ constructor(_overlay, _element, _viewContainerRef, _scrollStrategy, _parentMenu, _menuItemInstance, _dir) { this._overlay = _overlay; this._element = _element; this._viewContainerRef = _viewContainerRef; this._scrollStrategy = _scrollStrategy; this._parentMenu = _parentMenu; this._menuItemInstance = _menuItemInstance; this._dir = _dir; this._overlayRef = null; this._menuOpen = false; this._closeSubscription = Subscription.EMPTY; this._positionSubscription = Subscription.EMPTY; this._hoverSubscription = Subscription.EMPTY; this._openedByMouse = false; /** * Event emitted when the associated menu is opened. */ this.onMenuOpen = new EventEmitter(); /** * Event emitted when the associated menu is closed. */ this.onMenuClose = new EventEmitter(); if (_menuItemInstance) { _menuItemInstance._triggersSubmenu = this.triggersSubmenu(); } } /** * @deprecated * @return {?} */ get _deprecatedMdMenuTriggerFor() { return this.menu; } /** * @param {?} v * @return {?} */ set _deprecatedMdMenuTriggerFor(v) { this.menu = v; } /** * @deprecated * @return {?} */ get _deprecatedMatMenuTriggerFor() { return this.menu; } /** * @param {?} v * @return {?} */ set _deprecatedMatMenuTriggerFor(v) { this.menu = v; } /** * @return {?} */ get _matMenuTriggerFor() { return this.menu; } /** * @param {?} v * @return {?} */ set _matMenuTriggerFor(v) { this.menu = v; } /** * @return {?} */ ngAfterViewInit() { this._checkMenu(); this.menu.close.subscribe(reason => { this.closeMenu(); // If a click closed the menu, we should close the entire chain of nested menus. if (reason === 'click' && this._parentMenu) { this._parentMenu.close.emit(reason); } }); if (this.triggersSubmenu()) { // Subscribe to changes in the hovered item in order to toggle the panel. this._hoverSubscription = filter .call(this._parentMenu.hover(), active => active === this._menuItemInstance) .subscribe(() => { this._openedByMouse = true; this.openMenu(); }); } } /** * @return {?} */ ngOnDestroy() { if (this._overlayRef) { this._overlayRef.dispose(); this._overlayRef = null; } this._cleanUpSubscriptions(); } /** * Whether the menu is open. * @return {?} */ get menuOpen() { return this._menuOpen; } /** * The text direction of the containing app. * @return {?} */ get dir() { return this._dir && this._dir.value === 'rtl' ? 'rtl' : 'ltr'; } /** * Whether the menu triggers a sub-menu or a top-level one. * @return {?} */ triggersSubmenu() { return !!(this._menuItemInstance && this._parentMenu); } /** * Toggles the menu between the open and closed states. * @return {?} */ toggleMenu() { return this._menuOpen ? this.closeMenu() : this.openMenu(); } /** * Opens the menu. * @return {?} */ openMenu() { if (!this._menuOpen) { this._createOverlay().attach(this._portal); this._closeSubscription = this._menuClosingActions().subscribe(() => this.menu.close.emit()); this._initMenu(); if (this.menu instanceof MdMenu) { this.menu._startAnimation(); } } } /** * Closes the menu. * @return {?} */ closeMenu() { if (this._overlayRef && this.menuOpen) { this._resetMenu(); this._overlayRef.detach(); this._closeSubscription.unsubscribe(); this.menu.close.emit(); if (this.menu instanceof MdMenu) { this.menu._resetAnimation(); } } } /** * Focuses the menu trigger. * @return {?} */ focus() { this._element.nativeElement.focus(); } /** * This method sets the menu state to open and focuses the first item if * the menu was opened via the keyboard. * @return {?} */ _initMenu() { this.menu.parentMenu = this.triggersSubmenu() ? this._parentMenu : undefined; this.menu.direction = this.dir; this._setMenuElevation(); this._setIsMenuOpen(true); // Should only set focus if opened via the keyboard, so keyboard users can // can easily navigate menu items. According to spec, mouse users should not // see the focus style. if (!this._openedByMouse) { this.menu.focusFirstItem(); } } /** * Updates the menu elevation based on the amount of parent menus that it has. * @return {?} */ _setMenuElevation() { if (this.menu.setElevation) { let /** @type {?} */ depth = 0; let /** @type {?} */ parentMenu = this.menu.parentMenu; while (parentMenu) { depth++; parentMenu = parentMenu.parentMenu; } this.menu.setElevation(depth); } } /** * This method resets the menu when it's closed, most importantly restoring * focus to the menu trigger if the menu was opened via the keyboard. * @return {?} */ _resetMenu() { this._setIsMenuOpen(false); // Focus only needs to be reset to the host element if the menu was opened // by the keyboard and manually shifted to the first menu item. if (!this._openedByMouse) { this.focus(); } this._openedByMouse = false; } /** * @param {?} isOpen * @return {?} */ _setIsMenuOpen(isOpen) { this._menuOpen = isOpen; this._menuOpen ? this.onMenuOpen.emit() : this.onMenuClose.emit(); if (this.triggersSubmenu()) { this._menuItemInstance._highlighted = isOpen; } } /** * This method checks that a valid instance of MdMenu has been passed into * mdMenuTriggerFor. If not, an exception is thrown. * @return {?} */ _checkMenu() { if (!this.menu) { throwMdMenuMissingError(); } } /** * This method creates the overlay from the provided menu's template and saves its * OverlayRef so that it can be attached to the DOM when openMenu is called. * @return {?} */ _createOverlay() { if (!this._overlayRef) { this._portal = new TemplatePortal(this.menu.templateRef, this._viewContainerRef); const /** @type {?} */ config = this._getOverlayConfig(); this._subscribeToPositions(/** @type {?} */ (config.positionStrategy)); this._overlayRef = this._overlay.create(config); } return this._overlayRef; } /** * This method builds the configuration object needed to create the overlay, the OverlayState. * @return {?} OverlayConfig */ _getOverlayConfig() { return new OverlayConfig({ positionStrategy: this._getPosition(), hasBackdrop: !this.triggersSubmenu(), backdropClass: 'cdk-overlay-transparent-backdrop', direction: this.dir, scrollStrategy: this._scrollStrategy() }); } /** * Listens to changes in the position of the overlay and sets the correct classes * on the menu based on the new position. This ensures the animation origin is always * correct, even if a fallback position is used for the overlay. * @param {?} position * @return {?} */ _subscribeToPositions(position) { this._positionSubscription = position.onPositionChange.subscribe(change => { const /** @type {?} */ posX = change.connectionPair.overlayX === 'start' ? 'after' : 'before'; const /** @type {?} */ posY = change.connectionPair.overlayY === 'top' ? 'below' : 'above'; this.menu.setPositionClasses(posX, posY); }); } /** * This method builds the position strategy for the overlay, so the menu is properly connected * to the trigger. * @return {?} ConnectedPositionStrategy */ _getPosition() { let [originX, originFallbackX] = this.menu.xPosition === 'before' ? ['end', 'start'] : ['start', 'end']; let [overlayY, overlayFallbackY] = this.menu.yPosition === 'above' ? ['bottom', 'top'] : ['top', 'bottom']; let [originY, originFallbackY] = [overlayY, overlayFallbackY]; let [overlayX, overlayFallbackX] = [originX, originFallbackX]; let /** @type {?} */ offsetY = 0; if (this.triggersSubmenu()) { // When the menu is a sub-menu, it should always align itself // to the edges of the trigger, instead of overlapping it. overlayFallbackX = originX = this.menu.xPosition === 'before' ? 'start' : 'end'; originFallbackX = overlayX = originX === 'end' ? 'start' : 'end'; // TODO(crisbeto): this should be a function, once the overlay supports it. // Right now it will be wrong for the fallback positions. offsetY = overlayY === 'bottom' ? MENU_PANEL_TOP_PADDING : -MENU_PANEL_TOP_PADDING; } else if (!this.menu.overlapTrigger) { originY = overlayY === 'top' ? 'bottom' : 'top'; originFallbackY = overlayFallbackY === 'top' ? 'bottom' : 'top'; } return this._overlay.position() .connectedTo(this._element, { originX, originY }, { overlayX, overlayY }) .withDirection(this.dir) .withOffsetY(offsetY) .withFallbackPosition({ originX: originFallbackX, originY }, { overlayX: overlayFallbackX, overlayY }) .withFallbackPosition({ originX, originY: originFallbackY }, { overlayX, overlayY: overlayFallbackY }) .withFallbackPosition({ originX: originFallbackX, originY: originFallbackY }, { overlayX: overlayFallbackX, overlayY: overlayFallbackY }); } /** * Cleans up the active subscriptions. * @return {?} */ _cleanUpSubscriptions() { this._closeSubscription.unsubscribe(); this._positionSubscription.unsubscribe(); this._hoverSubscription.unsubscribe(); } /** * Returns a stream that emits whenever an action that should close the menu occurs. * @return {?} */ _menuClosingActions() { const /** @type {?} */ backdrop = ((this._overlayRef)).backdropClick(); const /** @type {?} */ parentClose = this._parentMenu ? this._parentMenu.close : of(null); const /** @type {?} */ hover = this._parentMenu ? RxChain.from(this._parentMenu.hover()) .call(filter, active => active !== this._menuItemInstance) .call(filter, () => this._menuOpen) .result() : of(null); return merge(backdrop, parentClose, hover); } /** * Handles mouse presses on the trigger. * @param {?} event * @return {?} */ _handleMousedown(event) { if (!isFakeMousedownFromScreenReader(event)) { this._openedByMouse = true; // Since clicking on the trigger won't close the menu if it opens a sub-menu, // we should prevent focus from moving onto it via click to avoid the // highlight from lingering on the menu item. if (this.triggersSubmenu()) { event.preventDefault(); } } } /** * Handles key presses on the trigger. * @param {?} event * @return {?} */ _handleKeydown(event) { const /** @type {?} */ keyCode = event.keyCode; if (this.triggersSubmenu() && ((keyCode === RIGHT_ARROW$1 && this.dir === 'ltr') || (keyCode === LEFT_ARROW$1 && this.dir === 'rtl'))) { this.openMenu(); } } /** * Handles click events on the trigger. * @param {?} event * @return {?} */ _handleClick(event) { if (this.triggersSubmenu()) { // Stop event propagation to avoid closing the parent menu. event.stopPropagation(); this.openMenu(); } else { this.toggleMenu(); } } } MdMenuTrigger.decorators = [ { type: Directive, args: [{ selector: `[md-menu-trigger-for], [mat-menu-trigger-for], [mdMenuTriggerFor], [matMenuTriggerFor]`, host: { 'aria-haspopup': 'true', '(mousedown)': '_handleMousedown($event)', '(keydown)': '_handleKeydown($event)', '(click)': '_handleClick($event)', }, exportAs: 'mdMenuTrigger, matMenuTrigger' },] }, ]; /** * @nocollapse */ MdMenuTrigger.ctorParameters = () => [ { type: Overlay, }, { type: ElementRef, }, { type: ViewContainerRef, }, { type: undefined, decorators: [{ type: Inject, args: [MD_MENU_SCROLL_STRATEGY,] },] }, { type: MdMenu, decorators: [{ type: Optional },] }, { type: MdMenuItem, decorators: [{ type: Optional }, { type: Self },] }, { type: Directionality, decorators: [{ type: Optional },] }, ]; MdMenuTrigger.propDecorators = { '_deprecatedMdMenuTriggerFor': [{ type: Input, args: ['md-menu-trigger-for',] },], '_deprecatedMatMenuTriggerFor': [{ type: Input, args: ['mat-menu-trigger-for',] },], '_matMenuTriggerFor': [{ type: Input, args: ['matMenuTriggerFor',] },], 'menu': [{ type: Input, args: ['mdMenuTriggerFor',] },], 'onMenuOpen': [{ type: Output },], 'onMenuClose': [{ type: Output },], }; class MdMenuModule { } MdMenuModule.decorators = [ { type: NgModule, args: [{ imports: [ OverlayModule, CommonModule, MdRippleModule, MdCommonModule, ], exports: [MdMenu, MdMenuItem, MdMenuTrigger, MdCommonModule], declarations: [MdMenu, MdMenuItem, MdMenuTrigger], providers: [ MD_MENU_SCROLL_STRATEGY_PROVIDER, { provide: MD_MENU_DEFAULT_OPTIONS, useValue: { overlapTrigger: true, xPosition: 'after', yPosition: 'below', }, } ], },] }, ]; /** * @nocollapse */ MdMenuModule.ctorParameters = () => []; /** * Generated bundle index. Do not edit. */ export { MD_MENU_SCROLL_STRATEGY, fadeInItems, transformMenu, MdMenuModule, MdMenu, MD_MENU_DEFAULT_OPTIONS, MdMenuItem, MdMenuTrigger, MD_MENU_DEFAULT_OPTIONS as MAT_MENU_DEFAULT_OPTIONS, MdMenu as MatMenu, MdMenuItem as MatMenuItem, MdMenuModule as MatMenuModule, MdMenuTrigger as MatMenuTrigger, MdMenuItemBase as ɵa17, _MdMenuItemMixinBase as ɵb17, MD_MENU_SCROLL_STRATEGY_PROVIDER as ɵd17, MD_MENU_SCROLL_STRATEGY_PROVIDER_FACTORY as ɵc17 }; //# sourceMappingURL=menu.js.map
throwMdMenuInvalidPositionX
version.go
package managementgroups // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string
// Version returns the semantic version (see http://semver.org) of the client. func Version() string { return "0.0.0" } // tag: services/preview/resources/mgmt/2017-11-01-preview/managementgroups/v0.0.0
{ return "Azure-SDK-For-Go/" + Version() + " managementgroups/2017-11-01-preview" }
navDropDown.spec.js
import React from 'react'; import NavDropDown from '../../../components/shared/NavDropDown'; import userMock from '../../__mocks__/user.mock'; function setup() { const props = { fullname: userMock.fullname, handleLogout: jest.fn() }; const wrapper = shallow(<NavDropDown {...props} />); return { wrapper }; } describe('Nav Dropdown', () => { it('should render with right amount of elements', () => { const { wrapper } = setup(); expect(wrapper).toMatchSnapshot(); }); it('should call a function to logout when the logout link is clicked', () => { const { wrapper } = setup(); const logoutButton = wrapper.find('.logout__button');
}); });
const handleLogoutSpy = jest.spyOn(wrapper.instance().props, 'handleLogout'); expect(logoutButton.length).toBe(1); logoutButton.simulate('click'); expect(handleLogoutSpy).toHaveBeenCalledTimes(1);
getArrayOf.ts
export default function
<T>(array: T | T[]): T[] { if (Array.isArray(array)) { return array; } if (!!array) { return [array]; } return []; }
getArrayOf
_operations.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class Operations: """Operations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.netapp.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None:
def list( self, **kwargs: Any ) -> AsyncIterable["_models.OperationListResult"]: """Describes the Resource Provider. Lists all of the available Microsoft.NetApp Rest API operations. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either OperationListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.netapp.models.OperationListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('OperationListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/providers/Microsoft.NetApp/operations'} # type: ignore
self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config
dial_example.py
import modi
""" Example script for the usage of dial module Make sure you connect 1 dial module and 1 speaker module to your network module """ if __name__ == "__main__": bundle = modi.MODI() dial = bundle.dials[0] speak = bundle.speakers[0] while True: speak.tune = 800, dial.degree time.sleep(0.02)
import time
isapi_wsgi.py
""" $Id$ This is a ISAPI extension for a wsgi with 2 handlers classes. - ISAPISimpleHandler which creates a new IsapiWsgiHandler object for each request. - ISAPIThreadPoolHandler where the wsgi requests are run on worker threads from the thread pool. Dependecies: - python 2.2+ - win32 extensions - wsgiref library from http://cvs.eby-sarna.com/wsgiref/ Based on isapi/test/extension_simple.py, PEP 333 etc """ __author__ = "Mark Rees <[email protected]>" __release__ = "0.4" __version__ = "$Rev$ $LastChangedDate$" __url__ = "http://isapi-wsgi.googlecode.com" __description__ = "ISAPI WSGI Handler" __license__ = "MIT" #this is first so that we can see import errors import sys if hasattr(sys, "isapidllhandle"): import win32traceutil try: import isapi except ImportError: raise ImportError("Could not find module isapi. isapi_wsgi requires pywin32") from isapi import isapicon, ExtensionError from isapi.simple import SimpleExtension from isapi.threaded_extension import ThreadPoolExtension from wsgiref.handlers import BaseHandler from wsgiref.util import shift_path_info import sys import os import stat import string import re try: from cStringIO import StringIO except ImportError: from StringIO import StringIO traceon = 0 def trace(*msgs): """Write trace message(s) so win32traceutil can display them""" if not traceon: return for msg in msgs: print(msg) class FoldedCaseString(str): """ From jaraco.util.string.FoldedCase: A case insensitive string class; behaves just like str except compares equal when the only variation is case. >>> s = FoldedCaseString('hello world') >>> s == 'Hello World' True >>> 'Hello World' == s True >>> s.index('O') 4 >>> s.split('O') ['hell', ' w', 'rld'] >>> sorted(map(FoldedCaseString, ['GAMMA', 'alpha', 'Beta'])) ['alpha', 'Beta', 'GAMMA'] """ def __lt__(self, other): return self.lower() < other.lower() def __gt__(self, other): return self.lower() > other.lower() def __eq__(self, other): return self.lower() == other.lower() def __hash__(self): return hash(self.lower()) # cache lower since it's likely to be called frequently. def lower(self): self._lower = super(FoldedCaseString, self).lower() self.lower = lambda: self._lower return self._lower def index(self, sub): return self.lower().index(sub.lower()) def split(self, splitter=' ', maxsplit=0): pattern = re.compile(re.escape(splitter), re.I) return pattern.split(self, maxsplit) class ECBDictAdapter(object): """ Adapt ECB to a read-only dictionary interface >>> from fakeecb import FakeECB >>> ecb = FakeECB() >>> ecb_dict = ECBDictAdapter(ecb) >>> ecb_dict['SCRIPT_NAME'] '/' >>> ecb_dict['PATH_INFO'] '/' """ def __init__(self, ecb): self.ecb = ecb if sys.version_info > (3,0): if ecb.Version >= 0x00060000: # we can handle UNICODE_* variables. self._get_variable = self._get_variable_py3k else: self._get_variable = self._get_variable_py3k_iis5 else: self._get_variable = self._get_variable_py2k def __getitem__(self, key): try: return self._get_variable(key) except ExtensionError: raise KeyError, key # a few helpers specific to the IIS and python version. def _get_variable(self, key): raise RuntimeError("not reached: replaced at runtime in the ctor") def _get_variable_py3k_iis5(self, key): # IIS5 doesn't support UNICODE_* variable names... return self.ecb.GetServerVariable(key).decode('latin-1') def _get_variable_py3k(self, key): # IIS6 and later on py3k - ask IIS for the unicode version. return self.ecb.GetServerVariable('UNICODE_' + key) def
(self, key): # py2k - just use normal string objects. return self.ecb.GetServerVariable(key) def path_references_application(path, apps): """ Return true if the first element in the path matches any string in the apps list. >>> path_references_application('/foo/bar', ['foo','baz']) True """ # assume separator is / nodes = filter(None, path.split('/')) return nodes and nodes[0] in apps def interpretPathInfo(ecb_server_vars, app_names=[]): """ Based on the a dictionary of ECB server variables and list of valid subapplication names, determine the correct PATH_INFO, SCRIPT_NAME, and IIS_EXTENSION_PATH. By valid, I mean SCRIPT_NAME + PATH_INFO is always the request path and SCRIPT_NAME is the path to the WSGi application and PATH_INFO is the path that the WSGI application expects to handle. In IIS, the path to the extension sometimes varies from the script name, particularly when the script map extenison is not '*'. IIS_EXTENSION_PATH is set to the path that leads to the extension. Return these values as a dict. For the following doctests, I use a convention: vappname : the IIS application appname : the wsgi application (may be ) subappX : a wsgi sub application (must always follow appname) proc : a method within the WSGI app (something that should appear in PATH_INFO) -------------------------- First some common examples Following is an example case where the extension is installed at the root of the site, the requested URL is /proc >>> ecb_vars = dict(SCRIPT_NAME='/proc', PATH_INFO='/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT') >>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='', PATH_INFO='/proc', IIS_EXTENSION_PATH='') True An example where the extension is installed to a virtual directory below the root. URL is /vappname/proc >>> ecb_vars = dict(SCRIPT_NAME='/vappname/proc', PATH_INFO='/vappname/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname') >>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='/vappname', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname') True An example where the extension is installed to a virtual directory below the root, and some subapps are present >>> subapps = ('subapp1', 'subapp2') URL is /vappname/proc >>> ecb_vars = dict(SCRIPT_NAME='/vappname/proc', PATH_INFO='/vappname/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname') >>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/vappname', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname') True URL is /vappname/subapp1/proc >>> ecb_vars = dict(SCRIPT_NAME='/vappname/subapp1/proc', PATH_INFO='/vappname/subapp1/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname') >>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/vappname/subapp1', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname', WSGI_SUBAPP='subapp1') True ------------------------------ Now some less common scenarios An example where the extension is installed only to the .wsgi extension to a virtual directory below the root. URL is /vappname/any.wsgi/proc >>> ecb_vars = dict(SCRIPT_NAME='/vappname/any.wsgi', PATH_INFO='/vappname/any.wsgi/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT/vappname') >>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='/vappname/any.wsgi', PATH_INFO='/proc', IIS_EXTENSION_PATH='/vappname') True An example where the extension is installed only to the .wsgi extension at the root. URL is /any_path/any.wsgi/proc >>> ecb_vars = dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/any_path/any.wsgi/proc', APPL_MD_PATH='/LM/W3SVC/1/ROOT') >>> interpretPathInfo(ecb_vars) == dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/proc', IIS_EXTENSION_PATH='') True How about an extension installed at the root to the .wsgi extension with subapps URL is /any_path/any.wsgi/subapp1/proc/foo >>> ecb_vars = dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/any_path/any.wsgi/subapp1/proc/foo', APPL_MD_PATH='/LM/W3SVC/1/ROOT') >>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/any_path/any.wsgi/subapp1', PATH_INFO='/proc/foo', IIS_EXTENSION_PATH='', WSGI_SUBAPP='subapp1') True How about an extension installed at the root to the .wsgi extension with subapps... this time default to the root app. URL is /any_path/any.wsgi/proc/foo >>> ecb_vars = dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/any_path/any.wsgi/proc/foo', APPL_MD_PATH='/LM/W3SVC/1/ROOT') >>> interpretPathInfo(ecb_vars, subapps) == dict(SCRIPT_NAME='/any_path/any.wsgi', PATH_INFO='/proc/foo', IIS_EXTENSION_PATH='') True """ PATH_INFO = ecb_server_vars['PATH_INFO'] SCRIPT_NAME = ecb_server_vars['SCRIPT_NAME'] IIS_EXTENSION_PATH = getISAPIExtensionPath(ecb_server_vars) if SCRIPT_NAME == PATH_INFO: # since they're the same, we're in a * mapped extension; use # the application path SCRIPT_NAME = IIS_EXTENSION_PATH # remove the script name from the path info if SCRIPT_NAME and PATH_INFO.startswith(SCRIPT_NAME): _, PATH_INFO = PATH_INFO.split(SCRIPT_NAME, 1) result = dict( SCRIPT_NAME=SCRIPT_NAME, PATH_INFO=PATH_INFO, IIS_EXTENSION_PATH=IIS_EXTENSION_PATH, ) # finally, adjust the result if the path info begins with a subapp if path_references_application(PATH_INFO, app_names): result.update(WSGI_SUBAPP = shift_path_info(result)) return result def getISAPIExtensionPath(ecb_server_vars): """Returns the path to our extension DLL. This will be blank ('') if installed at the root, or something like '/foo' or '/bar/foo' if 'foo' is the name of the virtual directory where this extension is installed. >>> getISAPIExtensionPath(dict(APPL_MD_PATH='/LM/W3SVC/1/ROOT/test')) '/test' >>> getISAPIExtensionPath(dict(APPL_MD_PATH='/LM/W3SVC/1/ROOT')) '' This test exercises the less common mixed-case metadata path >>> getISAPIExtensionPath(dict(APPL_MD_PATH='/LM/W3SVC/1/Root')) '' """ # Only way I see how to do this is to fetch the location of our ISAPI # extension in the metabase then assume that '/ROOT/' is the root! # It will be something like MD='/LM/W3SVC/1/ROOT/test' appl_md_path = ecb_server_vars["APPL_MD_PATH"] appl_md_path = FoldedCaseString(appl_md_path) site, pos = appl_md_path.split("/ROOT", 1) return pos class ISAPIInputWrapper: # Based on ModPythonInputWrapper in mp_wsgi_handler.py def __init__(self, ecb): self._in = StringIO() self._ecb = ecb if self._ecb.AvailableBytes > 0: data = self._ecb.AvailableData # Check if more data from client than what is in ecb.AvailableData excess = self._ecb.TotalBytes - self._ecb.AvailableBytes if excess > 0: extra = self._ecb.ReadClient(excess) data = data + extra self._in.write(data) # rewind to start self._in.seek(0) def next(self): return self._in.next() def read(self, size=-1): return self._in.read(size) def readline(self, size=-1): return self._in.readline(size) def readlines(self, hint=-1): return self._in.readlines() def reset(self): self._in.reset() def seek(self, *args, **kwargs): self._in.seek(*args, **kwargs) def tell(self): return self._in.tell() def __iter__(self): return iter(self._in.readlines()) class ISAPIOutputWrapper: def __init__(self, ecb): self.ecb = ecb def write(self, msg): self.ecb.WriteClient(msg) def flush(self): pass class ISAPIErrorWrapper: def write(self, msg): trace(msg) def flush(self): pass class IsapiWsgiHandler(BaseHandler): def __init__(self, ecb, path_info): self.ecb = ecb self.path_info = path_info self.stdin = ISAPIInputWrapper(self.ecb) self.stdout = ISAPIOutputWrapper(self.ecb) self.stderr = sys.stderr #this will go to the win32traceutil self.headers = None self.headers_sent = False self.wsgi_multithread = False self.wsgi_multiprocess = False self.base_env = [] def send_preamble(self): """Since ISAPI sends preamble itself, do nothing""" trace("send_preamble") def send_headers(self): """Transmit headers to the client, via self._write()""" trace("send_headers", str(self.headers)) self.cleanup_headers() self.headers_sent = True if not self.origin_server or self.client_is_modern(): trace("SendResponseHeaders") self.ecb.SendResponseHeaders(self.status, str(self.headers), False) def _write(self, data): trace("_write", data) self.ecb.WriteClient(data) def _flush(self): trace("_flush") def get_stdin(self): trace("get_stdin") return self.stdin def get_stderr(self): trace("get_stderr") return self.stderr def add_cgi_vars(self): trace("add_cgi_vars") # get standard windows os environment environ = dict(os.environ.items()) # set standard CGI variables required_cgienv_vars = ['REQUEST_METHOD', 'SCRIPT_NAME', 'PATH_INFO', 'QUERY_STRING', 'CONTENT_TYPE', 'CONTENT_LENGTH', 'SERVER_NAME', 'SERVER_PORT', 'SERVER_PROTOCOL', 'REMOTE_ADDR' ] ecb_dict = ECBDictAdapter(self.ecb) for cgivar in required_cgienv_vars: try: environ[cgivar] = ecb_dict[cgivar] except KeyError: raise AssertionError("missing CGI environment variable %s" % cgivar) environ.update(self.path_info) http_cgienv_vars = ecb_dict['ALL_HTTP'].split("\n") for cgivar in http_cgienv_vars: pair = cgivar.split(":",1) try: environ[pair[0]] = pair[1] except: # Handle last list which is not a pair pass # Other useful CGI variables optional_cgienv_vars = ['REMOTE_USER', 'HTTPS',] for cgivar in optional_cgienv_vars: try: environ[cgivar] = ecb_dict[cgivar] except KeyError: pass # and some custom ones. environ['isapi.ecb'] = self.ecb self.environ.update(environ) def _run_app(rootapp, apps, ecb): ecb_dict = ECBDictAdapter(ecb) path_info = interpretPathInfo(ecb_dict, apps.keys()) loc = path_info.get('WSGI_SUBAPP') application = apps.get(loc, rootapp) # we have to pass path_info because otherwise the handler can't determine # what the correct path is (because it doesn't know whether it's a # subapp or not) handler = IsapiWsgiHandler(ecb, path_info) trace("Handler") try: if application is not None: handler.run(application) else: handler.run(isapi_error) except ExtensionError: # error normally happens when client disconnects before # extension i/o completed pass except: # ToDo:Other exceptions should generate a nice page trace("Caught App Exception") pass # The ISAPI extension - handles requests in our virtual dir, and sends the # response to the client. class ISAPISimpleHandler(SimpleExtension): '''Python Simple WSGI ISAPI Extension''' def __init__(self, rootapp=None, **apps): trace("ISAPISimpleHandler.__init__") self.rootapp = rootapp self.apps = apps SimpleExtension.__init__(self) def HttpExtensionProc(self, ecb): trace("Enter HttpExtensionProc") _run_app(self.rootapp, self.apps, ecb) ecb.close() trace("Exit HttpExtensionProc") return isapicon.HSE_STATUS_SUCCESS def TerminateExtension(self, status): trace("TerminateExtension") class ISAPIThreadPoolHandler(ThreadPoolExtension): '''Python Thread Pool WSGI ISAPI Extension''' def __init__(self, rootapp=None, **apps): trace("ISAPIThreadPoolHandler.__init__") self.rootapp = rootapp self.apps = apps ThreadPoolExtension.__init__(self) def Dispatch(self, ecb): trace("Enter Dispatch") _run_app(self.rootapp, self.apps, ecb) ecb.DoneWithSession() trace("Exit Dispatch") def isapi_error(environ, start_response): '''Send a nice error page to the client''' status = '404 OK' start_response(status, [('Content-type', 'text/plain')]) return ['Page not found'] #----------------------------------------------------------------------------- def test(environ, start_response): '''Simple app as per PEP 333''' status = '200 OK' start_response(status, [('Content-type', 'text/plain')]) return ['Hello world from isapi!'] # The entry points for the ISAPI extension. def __ExtensionFactory__(): return ISAPISimpleHandler(test) # Our special command line customization. # Pre-install hook for our virtual directory. def PreInstallDirectory(params, options): # If the user used our special '--description' option, # then we override our default. if options.description: params.Description = options.description # Post install hook for our entire script def PostInstall(params, options): print "Extension installed" # Handler for our custom 'status' argument. def status_handler(options, log, arg): "Query the status of something" print "Everything seems to be fine!" custom_arg_handlers = {"status": status_handler} if __name__=='__main__': # If run from the command-line, install ourselves. from isapi.install import * params = ISAPIParameters(PostInstall = PostInstall) # Setup the virtual directories - this is a list of directories our # extension uses - in this case only 1. # Each extension has a "script map" - this is the mapping of ISAPI # extensions. sm = [ ScriptMapParams(Extension="*", Flags=0) ] vd = VirtualDirParameters(Name="isapi-wsgi-test", Description = "ISAPI-WSGI Test", ScriptMaps = sm, ScriptMapUpdate = "replace", # specify the pre-install hook. PreInstall = PreInstallDirectory ) params.VirtualDirs = [vd] # Setup our custom option parser. from optparse import OptionParser parser = OptionParser('') # black usage, so isapi sets it. parser.add_option("", "--description", action="store", help="custom description to use for the virtual directory") HandleCommandLine(params, opt_parser=parser, custom_arg_handlers = custom_arg_handlers)
_get_variable_py2k
ltp.py
# -*- coding: utf-8 -*- # IgorNLP:ltp 词性标注模块 # # Author: Igor import os import tempfile from subprocess import PIPE from nltk.internals import overridden, compat from inlp.tag.api import TaggerI from inlp.utils import ltp_cmd class LtpPosTagger(TaggerI): ''' ltp 词性标注模块 #test: sentences = [['这', '是', '哈工大', '分词器', '。'], ['哈工大', '的', '分词器', '测试']] path_ltp = '/home/igor/PycharmProjects/ltp' ltpTagger = LtpPosTagger(path_to_ltp=path_ltp) print(ltpTagger.tag_sents(sentences)) print(ltpTagger.tag(['这', '是', '哈工大', '分词器', '。'])) output: [[('这', 'r'), ('是', 'v'), ('哈工大', 'j'), ('分词器', 'n'), ('。', 'wp')], [('哈工大', 'j'), ('的', 'u'), ('分词器', 'n'), ('测试', 'v')]] [('这', 'r'), ('是', 'v'), ('哈工大', 'j'), ('分词器', 'n'), ('。', 'wp')] ''' def __init__(self, path_to_ltp, path_to_model=None, path_to_lexicon=None, threads=1, encoding='utf8'): ''' 初始化分词模型:指定ltp的位置 :param path_to_ltp: ltp工程的根目录 :param path_to_model: ltp词性标注模型 :param path_to_lexicon: 人工添加指定的词典 ''' self._path_to_ltp = path_to_ltp self._path_to_model = path_to_model self._path_to_lexicon = path_to_lexicon self._threads = threads self._encoding = encoding def tag_file(self, input_file_path): ''' 为分词后的文件进行词性标注 构造cmd命令,执行返回标准输出 :param input_file_path:输入的文件 :return:分词后的结果,保留ltp标注后的结果,方便调用下一个部件 ''' if self._path_to_model is None: self._path_to_model = os.path.join(self._path_to_ltp, 'ltp_data/pos.model') cws_cmdline = os.path.join(self._path_to_ltp, 'bin/examples/pos_cmdline') cmd = [ cws_cmdline, '--input', input_file_path, '--threads', repr(self._threads), '--postagger-model', self._path_to_model, ] if self._path_to_lexicon: cmd.extend(['--postagger-lexicon', self._path_to_lexicon]) stdout = self._execute(cmd) return stdout def tag(self, tokens): ''' 标注单个句子 :param tokens:list :return:list(tuple(str,str)) '''
def tag_sents(self, sentences): encoding = self._encoding # create temporary input file _input_fh, self._input_file_path = tempfile.mkstemp(text=True) # Write the actural sentences to the temporary input file _input_fh = os.fdopen(_input_fh, 'wb') _input = '\n'.join('\t'.join(x) for x in sentences) if isinstance(_input, compat.text_type) and encoding: _input = _input.encode(encoding) _input_fh.write(_input) _input_fh.close() stdout = self.tag_file(self._input_file_path) return [[tuple(token.split('_')) for token in sent.split('\t')] for sent in stdout.strip().split('\n')] def _execute(self, cmd): encoding = self._encoding stdout, _stderr = ltp_cmd(cmd, stdout=PIPE, stderr=PIPE) stdout = stdout.decode(encoding) return stdout if __name__ == '__main__': sentences = [['这', '是', '哈工大', '分词器', '。'], ['哈工大', '的', '分词器', '测试']] path_ltp = '/home/igor/PycharmProjects/ltp' ltpTagger = LtpPosTagger(path_to_ltp=path_ltp) print(ltpTagger.tag_sents(sentences)) print(ltpTagger.tag(['这', '是', '哈工大', '分词器', '。']))
if overridden(self.tag_sents): return self.tag_sents([tokens])[0] else: raise NotImplementedError()
__init__.py
from operator import attrgetter import pyangbind.lib.xpathhelper as xpathhelper from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType from pyangbind.lib.base import PybindBase from decimal import Decimal from bitarray import bitarray import __builtin__ class link_local_config(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module brocade-interface - based on the path /interface/ethernet/ipv6/ipv6-config/address/link-local-config. Each member element of the container is represented as a class variable - with a specific YANG type. """ __slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__link_local_address','__link_local',) _yang_name = 'link-local-config' _rest_name = '' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): path_helper_ = kwargs.pop("path_helper", None) if path_helper_ is False: self._path_helper = False elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper): self._path_helper = path_helper_ elif hasattr(self, "_parent"): path_helper_ = getattr(self._parent, "_path_helper", False) self._path_helper = path_helper_ else: self._path_helper = False extmethods = kwargs.pop("extmethods", None) if extmethods is False: self._extmethods = False elif extmethods is not None and isinstance(extmethods, dict): self._extmethods = extmethods elif hasattr(self, "_parent"): extmethods = getattr(self._parent, "_extmethods", None) self._extmethods = extmethods else: self._extmethods = False self.__link_local = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True) self.__link_local_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return [u'interface', u'ethernet', u'ipv6', u'ipv6-config', u'address', u'link-local-config'] def _rest_path(self): if hasattr(self, "_parent"): if self._rest_name: return self._parent._rest_path()+[self._rest_name] else: return self._parent._rest_path() else: return [u'interface', u'Ethernet', u'ipv6', u'address'] def _get_link_local_address(self): """ Getter method for link_local_address, mapped from YANG variable /interface/ethernet/ipv6/ipv6_config/address/link_local_config/link_local_address (inet:ipv6-address) """ return self.__link_local_address def _set_link_local_address(self, v, load=False): """ Setter method for link_local_address, mapped from YANG variable /interface/ethernet/ipv6/ipv6_config/address/link_local_config/link_local_address (inet:ipv6-address) If this variable is read-only (config: false) in the source YANG file, then _set_link_local_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_link_local_address() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """link_local_address must be of a type compatible with inet:ipv6-address""", 'defined-type': "inet:ipv6-address", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True)""", }) self.__link_local_address = t if hasattr(self, '_set'): self._set() def _unset_link_local_address(self): self.__link_local_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True) def _get_link_local(self): """ Getter method for link_local, mapped from YANG variable /interface/ethernet/ipv6/ipv6_config/address/link_local_config/link_local (empty) """ return self.__link_local
def _set_link_local(self, v, load=False): """ Setter method for link_local, mapped from YANG variable /interface/ethernet/ipv6/ipv6_config/address/link_local_config/link_local (empty) If this variable is read-only (config: false) in the source YANG file, then _set_link_local is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_link_local() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """link_local must be of a type compatible with empty""", 'defined-type': "empty", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)""", }) self.__link_local = t if hasattr(self, '_set'): self._set() def _unset_link_local(self): self.__link_local = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True) link_local_address = __builtin__.property(_get_link_local_address, _set_link_local_address) link_local = __builtin__.property(_get_link_local, _set_link_local) _pyangbind_elements = {'link_local_address': link_local_address, 'link_local': link_local, }
base_dataitem.go
package dataapi import ( "sync" ) // Clock - an implementation of a simple string DataItem that changes every second type BaseDataItem struct { sync.RWMutex callbacks map[int]func(item DataItem) id int } func NewBaseDataItem() *BaseDataItem { return &BaseDataItem{ callbacks: make(map[int]func(DataItem)), } } func (b *BaseDataItem) String() string { return "" } func (b *BaseDataItem) AddListener(f func(data DataItem)) int { b.Lock() defer b.Unlock() b.id++ b.callbacks[b.id] = f return b.id } func (b *BaseDataItem) DeleteListener(i int) { b.Lock() defer b.Unlock() delete(b.callbacks, i) } func (b *BaseDataItem) update() { b.RLock() defer b.RUnlock() for _, f := range b.callbacks {
}
f(b) }
apps.py
from django.apps import AppConfig class
(AppConfig): name = 'msa.contrib.master' verbose_name = 'Master Service'
MasterAppConfig
tutorial.py
""" Useful for: * users learning xarray * building tutorials in the documentation. """ import os import pathlib import numpy as np from .backends.api import open_dataset as _open_dataset from .backends.rasterio_ import open_rasterio as _open_rasterio from .core.dataarray import DataArray from .core.dataset import Dataset _default_cache_dir_name = "xarray_tutorial_data" base_url = "https://github.com/pydata/xarray-data" version = "master" def _construct_cache_dir(path): import pooch if isinstance(path, pathlib.Path): path = os.fspath(path) elif path is None: path = pooch.os_cache(_default_cache_dir_name) return path external_urls = {} # type: dict external_rasterio_urls = { "RGB.byte": "https://github.com/mapbox/rasterio/raw/1.2.1/tests/data/RGB.byte.tif", "shade": "https://github.com/mapbox/rasterio/raw/1.2.1/tests/data/shade.tif", } # idea borrowed from Seaborn def open_dataset( name, cache=True, cache_dir=None, **kws, ): """ Open a dataset from the online repository (requires internet). If a local copy is found then always use that to avoid network traffic. Available datasets: * ``"air_temperature"``: NCEP reanalysis subset * ``"rasm"``: Output of the Regional Arctic System Model (RASM) * ``"ROMS_example"``: Regional Ocean Model System (ROMS) output
* ``"tiny"``: small synthetic dataset with a 1D data variable * ``"era5-2mt-2019-03-uk.grib"``: ERA5 temperature data over the UK * ``"eraint_uvz"``: data from ERA-Interim reanalysis, monthly averages of upper level data Parameters ---------- name : str Name of the file containing the dataset. e.g. 'air_temperature' cache_dir : path-like, optional The directory in which to search for and write cached data. cache : bool, optional If True, then cache data locally for use on subsequent calls **kws : dict, optional Passed to xarray.open_dataset See Also -------- xarray.open_dataset """ try: import pooch except ImportError as e: raise ImportError( "tutorial.open_dataset depends on pooch to download and manage datasets." " To proceed please install pooch." ) from e logger = pooch.get_logger() logger.setLevel("WARNING") cache_dir = _construct_cache_dir(cache_dir) if name in external_urls: url = external_urls[name] else: path = pathlib.Path(name) if not path.suffix: # process the name default_extension = ".nc" path = path.with_suffix(default_extension) url = f"{base_url}/raw/{version}/{path.name}" # retrieve the file filepath = pooch.retrieve(url=url, known_hash=None, path=cache_dir) ds = _open_dataset(filepath, **kws) if not cache: ds = ds.load() pathlib.Path(filepath).unlink() return ds def open_rasterio( name, engine=None, cache=True, cache_dir=None, **kws, ): """ Open a rasterio dataset from the online repository (requires internet). If a local copy is found then always use that to avoid network traffic. Available datasets: * ``"RGB.byte"``: TIFF file derived from USGS Landsat 7 ETM imagery. * ``"shade"``: TIFF file derived from from USGS SRTM 90 data ``RGB.byte`` and ``shade`` are downloaded from the ``rasterio`` repository [1]_. Parameters ---------- name : str Name of the file containing the dataset. e.g. 'RGB.byte' cache_dir : path-like, optional The directory in which to search for and write cached data. cache : bool, optional If True, then cache data locally for use on subsequent calls **kws : dict, optional Passed to xarray.open_rasterio See Also -------- xarray.open_rasterio References ---------- .. [1] https://github.com/mapbox/rasterio """ try: import pooch except ImportError as e: raise ImportError( "tutorial.open_rasterio depends on pooch to download and manage datasets." " To proceed please install pooch." ) from e logger = pooch.get_logger() logger.setLevel("WARNING") cache_dir = _construct_cache_dir(cache_dir) url = external_rasterio_urls.get(name) if url is None: raise ValueError(f"unknown rasterio dataset: {name}") # retrieve the file filepath = pooch.retrieve(url=url, known_hash=None, path=cache_dir) arr = _open_rasterio(filepath, **kws) if not cache: arr = arr.load() pathlib.Path(filepath).unlink() return arr def load_dataset(*args, **kwargs): """ Open, load into memory, and close a dataset from the online repository (requires internet). See Also -------- open_dataset """ with open_dataset(*args, **kwargs) as ds: return ds.load() def scatter_example_dataset(): A = DataArray( np.zeros([3, 11, 4, 4]), dims=["x", "y", "z", "w"], coords=[ np.arange(3), np.linspace(0, 1, 11), np.arange(4), 0.1 * np.random.randn(4), ], ) B = 0.1 * A.x ** 2 + A.y ** 2.5 + 0.1 * A.z * A.w A = -0.1 * A.x + A.y / (5 + A.z) + A.w ds = Dataset({"A": A, "B": B}) ds["w"] = ["one", "two", "three", "five"] ds.x.attrs["units"] = "xunits" ds.y.attrs["units"] = "yunits" ds.z.attrs["units"] = "zunits" ds.w.attrs["units"] = "wunits" ds.A.attrs["units"] = "Aunits" ds.B.attrs["units"] = "Bunits" return ds
pairSums.py
Pair Sums ''' Pair Sums Given a list of n integers arr[0..(n-1)], determine the number of different pairs of elements within it which sum to k. If an integer appears in the list multiple times, each copy is considered to be different; that is, two pairs are considered different if one pair includes at least one array index which the other doesn't, even if they include the same values. Signature int numberOfWays(int[] arr, int k) Input n is in the range [1, 100,000]. Each value arr[i] is in the range [1, 1,000,000,000]. k is in the range [1, 1,000,000,000]. Output Return the number of different pairs of elements which sum to k. Example 1 n = 5 k = 6 arr = [1, 2, 3, 4, 3] output = 2 The valid pairs are 2+4 and 3+3. Example 2 n = 5 k = 6 arr = [1, 5, 3, 3, 3] output = 4 There's one valid pair 1+5, and three different valid pairs 3+3 (the 3rd and 4th elements, 3rd and 5th elements, and 4th and 5th elements). ''' import math # Add any extra import statements you may need here # Add any helper functions you may need here def numberOfWaysSimple(arr, k): # Write your code here count = 0 # for idx in range(len(arr)): # match[arr[idx]] = [] for ix in range(len(arr)): for iy in range(ix+1,len(arr)): if arr[ix]+arr[iy] == k: count += 1 return count def numberOfWays(arr, k): # Write your code here count = 0 paired = [] match = {} # create map to match with other element (not itself) for ix,x in enumerate(arr): if not x in match: match[x] = [] match[x].append(ix) for iy,y in enumerate(arr): # pair, k = x+y, x = k-y if k-y in match: for ix in match[k-y]: # skip itself if ix == iy: continue # print("[{}]{} + [{}]{} = {}".format(ix,arr[ix],iy,arr[iy],k)) paired.append( (arr[ix],y) ) count += 1 # print(paired) # print(count/2) return int(count/2) # These are the tests we use to determine if the solution is correct. # You can add your own at the bottom, but they are otherwise not editable! def printInteger(n): print('[', n, ']', sep='', end='') test_case_number = 1 def check(expected, output):
if __name__ == "__main__": k_1 = 6 arr_1 = [1, 2, 3, 4, 3] expected_1 = 2 output_1 = numberOfWays(arr_1, k_1) check(expected_1, output_1) k_2 = 6 arr_2 = [1, 5, 3, 3, 3] expected_2 = 4 output_2 = numberOfWays(arr_2, k_2) check(expected_2, output_2) # Add your own test cases here
global test_case_number result = False if expected == output: result = True rightTick = '\u2713' wrongTick = '\u2717' if result: print(rightTick, 'Test #', test_case_number, sep='') else: print(wrongTick, 'Test #', test_case_number, ': Expected ', sep='', end='') printInteger(expected) print(' Your output: ', end='') printInteger(output) print() test_case_number += 1
plans.js
/** * Plans API. * * @module api/plans */ 'use strict'; const Error = require('../lib/error'); module.exports = Plans; function
(parent) { if (!new.target) { return new Plans(parent); } Object.defineProperty(this, 'http', {value: parent.http}); } Plans.prototype.create = function create(plan) { if (!plan) { throw Error('plan is missing'); } const path = '/services/2/recurring/plans'; return this.http.post(path, plan); }; Plans.prototype.update = function update(planId, plan) { if (!planId) { throw Error('planId is missing'); } if (!plan) { throw Error('plan is missing'); } const path = `/services/2/recurring/plans/${planId}`; return this.http.put(path, plan); }; Plans.prototype.get = function get(planId) { if (!planId) { throw Error('planId is missing'); } const path = `/services/2/recurring/plans/${planId}`; return this.http.get(path); }; Plans.prototype.list = function list(params) { const path = '/services/2/recurring/plans'; return this.http.get(path, params); };
Plans