file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
alaska.py | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .core import UnitedStates
from workalendar.core import MON
from workalendar.registry import iso_register
@iso_register('US-AK')
class Alaska(UnitedStates):
"""Alaska"""
FIXED_HOLIDAYS = UnitedStates.FIXED_HOLIDAYS + (
(10, 18, 'Alaska Day'),
)
include_columbus_day = False
def get_variable_days(self, year):
| days = super(Alaska, self).get_variable_days(year)
days.append(
(Alaska.get_last_weekday_in_month(year, 3, MON), "Seward's Day")
)
return days |
|
modal.js | import { inject as service } from '@ember/service';
import { action } from '@ember/object';
import { tracked } from '@glimmer/tracking';
import Component from '@glimmer/component';
import { task, restartableTask } from 'ember-concurrency';
/** @typedef {import("../../../models/behandeling-van-agendapunt").default} Behandeling*/
/** @typedef {import("../../../models/bestuursorgaan").default} Bestuursorgaan*/
/** @typedef {import("../../../models/stemming").default} Stemming*/
/**
* @typedef {Object} Args
* @property {Behandeling} behandeling
* @property {Bestuursorgaan} bestuursorgaan
* @property {boolean} show
* @property {() => void} onClose
*/
/** @extends {Component<Args>} */
export default class | extends Component {
@tracked stemmingen;
@tracked create = false;
@tracked edit = false;
@tracked editMode = false;
/** @type {Stemming} */
@service store;
@service editStemming;
constructor(parent, args) {
super(parent, args);
this.fetchStemmingen.perform();
}
@restartableTask
/** @type {import("ember-concurrency").Task} */
*fetchStemmingen() {
this.stemmingen = (yield this.args.behandeling.stemmingen).sortBy(
'position'
);
}
@task
/** @type {import("ember-concurrency").Task} */
*saveStemming() {
const isNew = this.editStemming.stemming.isNew;
if (isNew) {
this.editStemming.stemming.position =
this.args.behandeling.stemmingen.length;
}
yield this.editStemming.saveTask.perform();
if (isNew) {
this.args.behandeling.stemmingen.pushObject(this.editStemming.stemming);
this.args.behandeling.save();
}
yield this.fetchStemmingen.perform();
this.onCancelEdit();
}
@task
/** @type {import("ember-concurrency").Task} */
*addStemming() {
const richTreatment = yield this.store.query('behandeling-van-agendapunt', {
'filter[:id:]': this.args.behandeling.id,
include: 'aanwezigen.bekleedt.bestuursfunctie',
});
const participants = richTreatment.firstObject.aanwezigen;
const stemmingToEdit = this.store.createRecord('stemming', {
onderwerp: '',
geheim: false,
aantalVoorstanders: 0,
aantalTegenstanders: 0,
aantalOnthouders: 0,
gevolg: '',
});
this.editMode = true;
stemmingToEdit.aanwezigen.pushObjects(participants);
stemmingToEdit.stemmers.pushObjects(participants);
this.editStemming.stemming = stemmingToEdit;
}
@action
toggleEditStemming(stemming) {
this.editStemming.stemming = stemming;
this.editMode = true;
}
@task
*removeStemming(stemming) {
yield stemming.destroyRecord();
this.stemmingen = this.stemmingen.reject((x) => x === stemming);
}
@action
onCancelEdit() {
this.editMode = false;
this.editStemming.stemming.rollbackAttributes();
this.editStemming.stemming = null;
}
}
| TreatmentVotingModalComponent |
migration.ts | // This file is intented to be used simply calling "pnpm i" or "pnpm migrate"
import type { FeedbackQuestionOption } from "../entities/feedback";
import type { FeedbackQuestionType } from "../../client/constants";
import fs from "fs";
import path from "path";
const migration = async () => {
const { FeedbackQuestionType, UserType } = await import(
"../../client/constants"
);
const { baseDBConfig, dbNames } = await import("./config");
const knexDB = (await import("knex")).default(baseDBConfig);
const createdDatabases = Object.values(dbNames);
for (const dbName of Object.values(dbNames)) {
try {
await knexDB.raw(`CREATE DATABASE "${dbName}";`);
} catch (err) {
// Expected possible error, duplicated database, database already exists.
// https://www.postgresql.org/docs/8.2/errcodes-appendix.html
if (err.code === "42P04") {
createdDatabases.splice(createdDatabases.indexOf(dbName), 1);
} else {
const { serializeError } = await import("serialize-error");
console.error(serializeError(err));
await knexDB.destroy();
process.exit(1);
}
}
}
if (createdDatabases.length) {
console.info(
`CREATED "${createdDatabases.join(" | ")}" DATABASE${
createdDatabases.length > 1 ? "S" : ""
}!`
);
}
await knexDB.destroy();
const sha1 = (await import("crypto-js/sha1")).default;
const { chunk, sample } = await import("lodash");
const { baseUserConfig } = await import("../../client/constants/userConfig");
const {
joinFeedbackQuestionOptions,
splitFeedbackQuestionOptions,
} = await import("../resolvers/feedback/utils");
const { dbAuth, dbConfig, dbData, dbTracking } = await import("./index");
const {
CONFIGURATION_TABLE,
COURSE_STATS_TABLE,
COURSE_TABLE,
COURSE_GROUPED_STATS_TABLE,
CourseStatsTable,
CourseTable,
CourseGroupedStatsTable,
FEEDBACK_FORM_QUESTION_TABLE,
FEEDBACK_FORM_TABLE,
FEEDBACK_RESULT_TABLE,
FeedbackFormQuestionTable,
FeedbackFormTable,
FeedbackResultTable,
GroupedComplementaryInformationTable,
GROUPED_COMPLEMENTARY_INFORMATION_TABLE,
PARAMETER_TABLE,
ParameterTable,
PERFORMANCE_BY_LOAD_TABLE,
PerformanceByLoadTable, | EXTERNAL_EVALUATION_STRUCTURE_TABLE,
EXTERNAL_EVALUATION_GROUPED_STATS_TABLE,
PROGRAM_TABLE,
ProgramStructureTable,
ExternalEvaluationStructureTable,
ProgramTable,
STUDENT_ADMISSION_TABLE,
STUDENT_EXTERNAL_EVALUATION_TABLE,
EXTERNAL_EVALUATION_TABLE,
EXTERNAL_EVALUATION_STATS_TABLE,
STUDENT_CLUSTER_TABLE,
STUDENT_COURSE_TABLE,
STUDENT_DROPOUT_TABLE,
STUDENT_EMPLOYED_TABLE,
STUDENT_GROUPED_EMPLOYED_TABLE,
STUDENT_PROGRAM_TABLE,
STUDENT_TABLE,
STUDENT_TERM_TABLE,
StudentAdmissionTable,
StudentExternalEvaluationTable,
ExternalEvaluationTable,
ExternalEvaluationStatsTable,
ExternalEvaluationGroupedStatsTable,
StudentClusterTable,
StudentCourseTable,
StudentDropoutTable,
StudentEmployedTable,
StudentProgramTable,
StudentGroupedEmployedTable,
StudentTable,
StudentTermTable,
TRACKING_TABLE,
USER_CONFIGURATION_TABLE,
USER_PROGRAMS_TABLE,
UserConfigurationTable,
UserProgramsTable,
USERS_TABLE,
UserTable,
} = await import("./tables");
const users = dbAuth.schema.hasTable(USERS_TABLE).then(async (exists) => {
if (!exists) {
await dbAuth.schema.createTable(USERS_TABLE, (table) => {
table.text("email").primary();
table.text("password").notNullable().defaultTo("");
table.text("name").notNullable().defaultTo("Default");
table.text("oldPassword1").notNullable().defaultTo("");
table.text("oldPassword2").notNullable().defaultTo("");
table.text("oldPassword3").notNullable().defaultTo("");
table.boolean("locked").notNullable().defaultTo(true);
table.integer("tries").notNullable().defaultTo(0);
table.text("unlockKey").notNullable().defaultTo("");
table.boolean("admin").notNullable().defaultTo(false);
table
.enum("type", Object.values(UserType))
.notNullable()
.defaultTo(UserType.Director);
table.text("student_id").notNullable().defaultTo("");
});
const mockStudent = (await import("./mockData/student.json")).default[0]
?.id;
await UserTable().insert({
email: "[email protected]",
password: sha1("admin").toString(),
name: "default admin",
locked: false,
admin: true,
type: UserType.Director,
student_id: mockStudent ?? "",
});
}
});
const usersPrograms = dbAuth.schema
.hasTable(USER_PROGRAMS_TABLE)
.then(async (exists) => {
if (!exists) {
await dbAuth.schema.createTable(USER_PROGRAMS_TABLE, (table) => {
table.text("email");
table.text("program");
table.primary(["email", "program"]);
});
await UserProgramsTable().insert(
(await import("./mockData/program.json")).default.map(({ id }) => {
return {
email: "[email protected]",
program: id.toString(),
};
})
);
}
});
const userConfig = dbConfig.schema
.hasTable(USER_CONFIGURATION_TABLE)
.then(async (exists) => {
if (!exists) {
await dbConfig.schema.createTable(USER_CONFIGURATION_TABLE, (table) => {
table.text("email").primary().notNullable();
table.json("config").notNullable();
});
await UserConfigurationTable().insert({
email: "[email protected]",
config: {
...baseUserConfig,
SHOW_GROUPED_VIEW: true,
SHOW_STUDENT_COMPLEMENTARY_INFORMATION: true,
SHOW_GROUPED_COMPLEMENTARY_INFO: true,
SHOW_DROPOUT: true,
SHOW_DOWNLOAD: true,
SHOW_PROGRESS_STUDENT_CYCLE: true,
SHOW_STUDENT_LIST: true,
FOREPLAN: true,
},
});
}
});
const config = dbConfig.schema
.hasTable(CONFIGURATION_TABLE)
.then(async (exists) => {
if (!exists) {
await dbConfig.schema.createTable(CONFIGURATION_TABLE, (table) => {
table.text("name").primary().defaultTo("");
table.text("value").defaultTo("").notNullable();
});
}
});
const track = dbTracking.schema
.hasTable(TRACKING_TABLE)
.then(async (exists) => {
if (!exists) {
await dbTracking.schema.createTable(TRACKING_TABLE, (table) => {
table.bigIncrements("id").primary().unsigned();
table.text("app_id").notNullable().defaultTo("undefined");
table.text("user_id").notNullable();
table.timestamp("datetime", { useTz: true }).notNullable();
table.timestamp("datetime_client", { useTz: true }).notNullable();
table.text("data").notNullable();
});
}
});
const course = dbData.schema.hasTable(COURSE_TABLE).then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(COURSE_TABLE, (table) => {
table.text("id").notNullable().primary();
table.text("name").notNullable();
table.text("description").notNullable();
table.text("tags").notNullable().defaultTo("");
table.text("grading").notNullable();
table.float("grade_min", 4).notNullable();
table.float("grade_max", 4).notNullable();
table.float("grade_pass_min", 4).notNullable();
});
await CourseTable().insert(
(await import("./mockData/course.json")).default
);
}
});
const courseStats = dbData.schema
.hasTable(COURSE_STATS_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(COURSE_STATS_TABLE, (table) => {
table.text("course_taken").notNullable();
table.integer("year", 4).notNullable();
table.integer("term", 4).notNullable();
table.integer("p_group", 2).notNullable();
table.integer("n_total", 8).notNullable();
table.integer("n_finished", 8).notNullable();
table.integer("n_pass", 8).notNullable();
table.integer("n_fail", 8).notNullable();
table.integer("n_drop", 8).notNullable();
table.text("histogram").notNullable();
table.float("avg_grade").notNullable();
table.integer("n_grades", 4).notNullable();
table.integer("id", 8).primary().notNullable();
table.text("histogram_labels").notNullable();
table.text("color_bands").notNullable();
});
const dataToInsert = chunk(
(await import("./mockData/course_stats.json")).default,
500
);
for (const chunkData of dataToInsert) {
await CourseStatsTable().insert(chunkData);
}
}
});
const param = dbData.schema.hasTable(PARAMETER_TABLE).then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(PARAMETER_TABLE, (table) => {
table.float("passing_grade", 8);
table.timestamp("loading_date");
});
await ParameterTable().insert(
(await import("./mockData/parameter.json")).default.map(
({ passing_grade, loading_date }) => {
return {
passing_grade,
loading_date: new Date(loading_date),
};
}
)
);
}
});
const program = dbData.schema.hasTable(PROGRAM_TABLE).then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(PROGRAM_TABLE, (table) => {
table.text("id").notNullable().primary();
table.text("name").notNullable();
table.text("desc").notNullable();
table.text("tags").notNullable();
table.boolean("active").notNullable().defaultTo(true);
table.float("last_gpa", 4).notNullable().defaultTo(0);
});
await ProgramTable().insert(
(await import("./mockData/program.json")).default.map(
({ id, ...rest }) => {
return {
...rest,
id: id.toString(),
};
}
)
);
}
});
const programStructure = dbData.schema
.hasTable(PROGRAM_STRUCTURE_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(PROGRAM_STRUCTURE_TABLE, (table) => {
table.integer("id", 8).notNullable().primary();
table.text("program_id").notNullable();
table.text("curriculum").notNullable();
table.integer("semester", 4).notNullable();
table.text("course_id").notNullable();
table.float("credits", 8).notNullable();
table.text("requisites").defaultTo("").notNullable();
table.text("mention").defaultTo("").notNullable();
table.text("course_cat").defaultTo("").notNullable();
table.text("mode").defaultTo("semestral").notNullable();
table.float("credits_sct", 8).notNullable();
table.text("tags").notNullable().defaultTo("");
});
await ProgramStructureTable().insert(
(await import("./mockData/program_structure.json")).default.map(
({ program_id, curriculum, ...rest }) => {
return {
...rest,
program_id: program_id.toString(),
curriculum: curriculum.toString(),
};
}
)
);
}
});
const groupedComplementaryInformationStructure = dbData.schema
.hasTable(GROUPED_COMPLEMENTARY_INFORMATION_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(
GROUPED_COMPLEMENTARY_INFORMATION_TABLE,
(table) => {
table.integer("id", 8).notNullable().primary();
table.text("program_id").notNullable();
table.text("curriculum").notNullable();
table.text("type_admission").notNullable();
table.text("cohort").notNullable();
table.integer("total_students", 6).notNullable();
table.float("university_degree_rate", 3).notNullable();
table.float("retention_rate", 3).notNullable();
table.float("average_time_university_degree", 3).notNullable();
table.float("timely_university_degree_rate", 3).notNullable();
}
);
await GroupedComplementaryInformationTable().insert(
(
await import("./mockData/grouped_complementary_information.json")
).default.map(({ program_id, curriculum, cohort, ...rest }) => {
return {
...rest,
program_id: program_id.toString(),
curriculum: curriculum.toString(),
cohort: cohort.toString(),
};
})
);
}
});
const studentGroupedEmployedStructure = dbData.schema
.hasTable(STUDENT_GROUPED_EMPLOYED_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(
STUDENT_GROUPED_EMPLOYED_TABLE,
(table) => {
table.integer("id", 8).notNullable().primary();
table.text("program_id").notNullable();
table.text("curriculum").notNullable();
table.text("type_admission").notNullable();
table.text("cohort").notNullable();
table.integer("total_students", 6).notNullable();
table.float("employed_rate", 3).notNullable();
table.float("average_time_job_finding", 3).notNullable();
table.float("employed_rate_educational_system", 3).notNullable();
}
);
await StudentGroupedEmployedTable().insert(
(
await import("./mockData/student_grouped_employed.json")
).default.map(({ program_id, curriculum, cohort, ...rest }) => {
return {
...rest,
program_id: program_id.toString(),
curriculum: curriculum.toString(),
cohort: cohort.toString(),
};
})
);
}
});
const externalEvaluationStructure = dbData.schema
.hasTable(EXTERNAL_EVALUATION_STRUCTURE_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(
EXTERNAL_EVALUATION_STRUCTURE_TABLE,
(table) => {
table.integer("id", 8).notNullable().primary();
table.text("program_id").notNullable();
table.text("curriculum").notNullable();
table.integer("year").notNullable();
table.integer("semester", 4).notNullable();
table.text("external_evaluation_id").notNullable();
table.float("credits", 8).notNullable();
table.text("requisites").defaultTo("").notNullable();
table.text("mention").defaultTo("").notNullable();
table.text("evaluation_cat").defaultTo("").notNullable();
table.text("mode").defaultTo("semestral").notNullable();
table.float("credits_sct", 8).notNullable();
table.text("tags").notNullable().defaultTo("");
}
);
await ExternalEvaluationStructureTable().insert(
(
await import("./mockData/external_evaluation_structure.json")
).default.map(({ program_id, curriculum, ...rest }) => {
return {
...rest,
program_id: program_id.toString(),
curriculum: curriculum.toString(),
};
})
);
}
});
const student = dbData.schema.hasTable(STUDENT_TABLE).then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(STUDENT_TABLE, (table) => {
table.text("id").notNullable().primary();
table.text("name").notNullable();
table.text("state").notNullable();
});
await StudentTable().insert(
(await import("./mockData/student.json")).default
);
}
});
const studentCourse = dbData.schema
.hasTable(STUDENT_COURSE_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(STUDENT_COURSE_TABLE, (table) => {
table.integer("id", 8).notNullable().primary();
table.integer("year", 4).notNullable();
table.integer("term", 4).notNullable();
table.text("student_id").notNullable();
table.text("course_taken").notNullable();
table.text("course_equiv").notNullable();
table.text("elect_equiv").notNullable();
table.text("registration").notNullable();
table.text("state").notNullable();
table.float("grade", 8).notNullable();
table.integer("p_group", 2).notNullable();
table.text("comments").notNullable();
table.text("instructors").notNullable();
table.integer("duplicates", 8).notNullable();
});
await StudentCourseTable().insert(
(await import("./mockData/student_course.json")).default
);
}
});
const studentDropout = dbData.schema
.hasTable(STUDENT_DROPOUT_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(STUDENT_DROPOUT_TABLE, (table) => {
table.text("student_id").notNullable().primary();
table.float("prob_dropout", 4);
table.text("weight_per_semester");
table.boolean("active").defaultTo(false).notNullable();
table.float("model_accuracy", 4);
table.text("explanation");
});
await StudentDropoutTable().insert(
(await import("./mockData/student_dropout.json")).default.map(
({ weight_per_semester, ...rest }) => {
return {
...rest,
weight_per_semester: weight_per_semester.toString(),
};
}
)
);
}
});
const studentProgram = dbData.schema
.hasTable(STUDENT_PROGRAM_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(STUDENT_PROGRAM_TABLE, (table) => {
table.text("student_id").notNullable();
table.text("program_id").notNullable();
table.text("curriculum").notNullable();
table.primary(["student_id", "program_id", "curriculum"]);
table.integer("start_year", 4).notNullable();
table.text("mention").notNullable();
table.integer("last_term", 4).notNullable();
table.integer("n_courses", 8).notNullable();
table.integer("n_passed_courses", 4).notNullable();
table.float("completion", 4).notNullable();
});
await StudentProgramTable().insert(
(await import("./mockData/student_program.json")).default.map(
({ program_id, curriculum, ...rest }) => {
return {
...rest,
program_id: program_id.toString(),
curriculum: curriculum.toString(),
};
}
)
);
}
});
const studentTerm = dbData.schema
.hasTable(STUDENT_TERM_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(STUDENT_TERM_TABLE, (table) => {
table.integer("id", 8).primary().notNullable();
table.text("student_id").notNullable();
table.integer("year", 4).notNullable();
table.integer("term", 4).notNullable();
table.text("situation").notNullable();
table.float("t_gpa", 8).notNullable();
table.float("c_gpa", 8).notNullable();
table.text("comments").notNullable().defaultTo("");
table.text("program_id").notNullable();
table.text("curriculum").notNullable();
table.integer("start_year", 4).notNullable();
table.text("mention").notNullable().defaultTo("");
});
await StudentTermTable().insert(
(await import("./mockData/student_term.json")).default.map(
({ comments, program_id, curriculum, ...rest }) => {
return {
...rest,
comments: comments.toString(),
program_id: program_id.toString(),
curriculum: curriculum.toString(),
};
}
)
);
}
});
const studentAdmission = dbData.schema
.hasTable(STUDENT_ADMISSION_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(STUDENT_ADMISSION_TABLE, (table) => {
table.text("student_id").notNullable().primary();
table.boolean("active").notNullable().defaultTo(true);
table.text("type_admission").notNullable();
table.float("initial_evaluation", 4);
table.float("final_evaluation", 4);
});
await StudentAdmissionTable().insert(
(await import("./mockData/student_admission.json")).default
);
}
});
const studentExternalEvaluation = dbData.schema
.hasTable(STUDENT_EXTERNAL_EVALUATION_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(
STUDENT_EXTERNAL_EVALUATION_TABLE,
(table) => {
table.integer("id").notNullable().primary();
table.integer("year").notNullable();
table.integer("term").notNullable();
table.text("student_id").notNullable();
table.text("external_evaluation_taken").notNullable();
table.text("topic").notNullable();
table.text("registration").notNullable();
table.text("state").notNullable();
table.text("grade").notNullable();
table.integer("p_group").notNullable();
table.text("comments");
table.integer("duplicates").notNullable();
}
);
await StudentExternalEvaluationTable().insert(
(await import("./mockData/student_external_evaluation.json")).default
);
}
});
const externalEvaluation = dbData.schema
.hasTable(EXTERNAL_EVALUATION_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(EXTERNAL_EVALUATION_TABLE, (table) => {
table.text("id").notNullable().primary();
table.text("name").notNullable();
table.text("description").notNullable();
table.text("tags").notNullable();
table.text("grading").notNullable();
table.integer("grade_min").notNullable();
table.integer("grade_max").notNullable();
table.integer("grade_pass_min").notNullable();
});
await ExternalEvaluationTable().insert(
(await import("./mockData/external_evaluation.json")).default
);
}
});
const externalEvaluationStats = dbData.schema
.hasTable(EXTERNAL_EVALUATION_STATS_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(
EXTERNAL_EVALUATION_STATS_TABLE,
(table) => {
table.text("external_evaluation_taken").notNullable();
table.integer("year", 4).notNullable();
table.integer("term", 4).notNullable();
table.text("topic").notNullable();
table.integer("p_group", 2).notNullable();
table.integer("n_total", 8).notNullable();
table.integer("n_finished", 8).notNullable();
table.integer("n_pass", 8).notNullable();
table.integer("n_fail", 8).notNullable();
table.integer("n_drop", 8).notNullable();
table.text("histogram").notNullable();
table.float("avg_grade").notNullable();
table.integer("n_grades", 4).notNullable();
table.integer("id", 8).primary().notNullable();
table.text("histogram_labels").notNullable();
table.text("color_bands").notNullable();
}
);
await ExternalEvaluationStatsTable().insert(
(await import("./mockData/external_evaluation_stats.json")).default
);
}
});
const externalEvaluationGroupedStats = dbData.schema
.hasTable(EXTERNAL_EVALUATION_GROUPED_STATS_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(
EXTERNAL_EVALUATION_GROUPED_STATS_TABLE,
(table) => {
table.integer("id", 8).notNullable().primary();
table.text("external_evaluation_id").notNullable();
table.text("topic").notNullable();
table.text("program_id").notNullable();
table.text("curriculum").notNullable();
table.text("type_admission").notNullable();
table.text("cohort").notNullable();
table.integer("year", 4).notNullable();
table.integer("term", 4).notNullable();
table.integer("n_students").notNullable();
table.integer("n_total", 8).notNullable();
table.integer("n_finished", 8).notNullable();
table.integer("n_pass", 8).notNullable();
table.integer("n_fail", 8).notNullable();
table.integer("n_drop", 8).notNullable();
table.text("histogram").notNullable();
table.text("histogram_labels").notNullable();
table.text("color_bands").notNullable();
}
);
await ExternalEvaluationGroupedStatsTable().insert(
(
await import("./mockData/external_evaluation_grouped_stats.json")
).default.map(({ program_id, curriculum, cohort, ...rest }) => {
return {
...rest,
program_id: program_id.toString(),
curriculum: curriculum.toString(),
cohort: cohort.toString(),
};
})
);
}
});
const courseGroupedStats = dbData.schema
.hasTable(COURSE_GROUPED_STATS_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(COURSE_GROUPED_STATS_TABLE, (table) => {
table.integer("id", 8).notNullable().primary();
table.text("course_id").notNullable();
table.text("program_id").notNullable();
table.text("curriculum").notNullable();
table.text("type_admission").notNullable();
table.text("cohort").notNullable();
table.integer("year", 4).notNullable();
table.integer("term", 4).notNullable();
table.integer("n_students").notNullable();
table.integer("n_total", 8).notNullable();
table.integer("n_finished", 8).notNullable();
table.integer("n_pass", 8).notNullable();
table.integer("n_fail", 8).notNullable();
table.integer("n_drop", 8).notNullable();
table.text("histogram").notNullable();
table.text("histogram_labels").notNullable();
table.text("color_bands").notNullable();
});
await CourseGroupedStatsTable().insert(
(await import("./mockData/course_grouped_stats.json")).default.map(
({ program_id, curriculum, cohort, ...rest }) => {
return {
...rest,
program_id: program_id.toString(),
curriculum: curriculum.toString(),
cohort: cohort.toString(),
};
}
)
);
}
});
const studentEmployed = dbData.schema
.hasTable(STUDENT_EMPLOYED_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(STUDENT_EMPLOYED_TABLE, (table) => {
table.text("student_id").notNullable().primary();
table.boolean("employed").defaultTo(false).notNullable();
table.text("institution");
table.text("educational_system");
table.integer("months_to_first_job");
table.text("description");
});
await StudentEmployedTable().insert(
(await import("./mockData/student_employed.json")).default
);
}
});
const performanceByLoad = dbData.schema
.hasTable(PERFORMANCE_BY_LOAD_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(PERFORMANCE_BY_LOAD_TABLE, (table) => {
table.integer("id", 8).primary().notNullable();
table.text("program_id").notNullable().defaultTo("");
table.integer("student_cluster", 2).notNullable();
table.text("courseload_unit").notNullable().defaultTo("credits");
table.float("courseload_lb", 4).notNullable();
table.float("courseload_ub", 4).notNullable();
table.float("hp_value", 4).notNullable();
table.float("mp_value", 4).notNullable();
table.float("lp_value", 4).notNullable();
table.text("message_title").notNullable();
table.text("message_text").notNullable();
table.text("cluster_label").notNullable();
table.integer("hp_count");
table.integer("mp_count");
table.integer("lp_count");
table.text("courseload_label").notNullable();
table.integer("n_total");
});
await PerformanceByLoadTable().insert(
(await import("./mockData/performance_by_load.json")).default
);
}
});
const studentcluster = dbData.schema
.hasTable(STUDENT_CLUSTER_TABLE)
.then(async (exists) => {
if (!exists) {
await dbData.schema.createTable(STUDENT_CLUSTER_TABLE, (table) => {
table.text("student_id");
table.text("program_id");
table.integer("cluster", 2);
table.primary(["student_id", "program_id"]);
});
await StudentClusterTable().insert(
(await import("./mockData/student_program.json")).default.map(
({ student_id, program_id }, index) => {
return {
student_id,
program_id: program_id.toString(),
cluster: index % 3,
};
}
)
);
}
});
const persistence = dbAuth.schema
.hasTable(PERSISTENCE_TABLE)
.then(async (exists) => {
if (!exists) {
await dbAuth.schema.createTable(PERSISTENCE_TABLE, (table) => {
table.text("user").notNullable();
table.text("key").notNullable();
table.json("data").notNullable();
table.timestamp("timestamp", { useTz: true }).notNullable();
table.increments("id").primary();
table.unique(["user", "key"]);
});
}
});
const mockFeedbackForms = [
{
id: 0,
name: "Feedback1",
},
{
id: 1,
name: "Feedback2",
},
];
const feedbackForm = dbTracking.schema
.hasTable(FEEDBACK_FORM_TABLE)
.then(async (exists) => {
if (!exists) {
await dbTracking.schema.createTable(FEEDBACK_FORM_TABLE, (table) => {
table.increments("id").primary();
table.text("name").notNullable();
table.integer("priority").notNullable().defaultTo(0);
});
await FeedbackFormTable().insert(mockFeedbackForms);
}
});
const mockFeedbackOptions: FeedbackQuestionOption[] = [
{
text: "option 1",
value: 1,
},
{
text: "option 2",
value: 2,
},
{
text: "option 3",
value: 3,
},
];
const mockFeedbackQuestions = [
{
id: 0,
form_id: 0,
question: "Question1",
type: FeedbackQuestionType.OpenText,
options: "",
},
{
id: 1,
form_id: 0,
question: "Question2",
type: FeedbackQuestionType.SingleAnswer,
options: joinFeedbackQuestionOptions(mockFeedbackOptions),
},
{
id: 2,
form_id: 0,
question: "Question3",
type: FeedbackQuestionType.MultipleAnswer,
options: joinFeedbackQuestionOptions(mockFeedbackOptions),
},
{
id: 3,
form_id: 1,
question: "Question4",
type: FeedbackQuestionType.OpenText,
options: "",
},
{
id: 4,
form_id: 1,
question: "Question5",
type: FeedbackQuestionType.SingleAnswer,
options: joinFeedbackQuestionOptions(mockFeedbackOptions),
},
{
id: 5,
form_id: 1,
question: "Question6",
type: FeedbackQuestionType.MultipleAnswer,
options: joinFeedbackQuestionOptions(mockFeedbackOptions),
},
];
const feedbackFormQuestion = dbTracking.schema
.hasTable(FEEDBACK_FORM_QUESTION_TABLE)
.then(async (exists) => {
if (!exists) {
await dbTracking.schema.createTable(
FEEDBACK_FORM_QUESTION_TABLE,
(table) => {
table.increments("id").primary();
table.integer("form_id").notNullable();
table.text("question").notNullable();
table
.enu("type", [
FeedbackQuestionType.OpenText,
FeedbackQuestionType.SingleAnswer,
FeedbackQuestionType.MultipleAnswer,
] as FeedbackQuestionType[])
.notNullable();
table.integer("priority").notNullable().defaultTo(0);
table.text("options").notNullable().defaultTo("");
}
);
await FeedbackFormQuestionTable().insert(mockFeedbackQuestions);
}
});
const feedbackResult = dbTracking.schema
.hasTable(FEEDBACK_RESULT_TABLE)
.then(async (exists) => {
if (!exists) {
await dbTracking.schema.createTable(FEEDBACK_RESULT_TABLE, (table) => {
table.integer("form_id").notNullable();
table.integer("question_id").notNullable();
table.text("user_id").notNullable();
table.text("answer").notNullable().defaultTo("");
table
.timestamp("timestamp", { useTz: true })
.notNullable()
.defaultTo(dbTracking.fn.now());
table.primary(["form_id", "question_id", "user_id"]);
});
const form_id = sample(mockFeedbackForms)?.id ?? 0;
const questionsOfForm = mockFeedbackQuestions.filter(
(question) => question.form_id === form_id
);
await FeedbackResultTable().insert(
questionsOfForm.map((question) => {
return {
form_id,
question_id: question.id,
user_id: "[email protected]",
answer:
sample(
splitFeedbackQuestionOptions(question.options)
)?.value.toString() ?? "random",
};
})
);
}
});
await Promise.all([
users,
usersPrograms,
userConfig,
config,
track,
course,
courseStats,
courseGroupedStats,
groupedComplementaryInformationStructure,
param,
program,
programStructure,
externalEvaluationStructure,
student,
studentAdmission,
studentExternalEvaluation,
studentGroupedEmployedStructure,
externalEvaluation,
externalEvaluationStats,
externalEvaluationGroupedStats,
studentCourse,
studentDropout,
studentEmployed,
studentProgram,
studentTerm,
performanceByLoad,
studentcluster,
persistence,
feedbackForm,
feedbackFormQuestion,
feedbackResult,
]);
await Promise.all([
dbAuth.destroy(),
dbConfig.destroy(),
dbData.destroy(),
dbTracking.destroy(),
]);
console.info("DATABASE PREPARED!");
};
if (process.env.NODE_ENV === undefined) {
//@ts-ignore
process.env.NODE_ENV = "development";
}
if (process.env.NODE_ENV !== "test") {
migration().catch((err) => {
console.error(err);
process.exit(1);
});
const envFilePath = path.join(process.cwd(), ".env");
if (!fs.existsSync(envFilePath)) {
fs.promises
.writeFile(
envFilePath,
`# Generate a couple of random strings, from https://onlinerandomtools.com/generate-random-string for example
SECRET=uhqktjgizfvmmjbiwgcrbtuvactkvazsnivphziciuywppuefeelitsigcvlly
COOKIE_SECRET=njfkpaignxcbksisfvksofmzoupagshkkqbiyfsksfmglihzuyibstciqxeeix
# This domain in production has to be changed to the target domain
# It should include the HTTP/s protocol
DOMAIN=http://localhost:3000
# Mail service credentials
# Sendgrid API key https://sendgrid.com/
# In development is optional, but in production is required
SENDGRID_API_KEY=SG.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
[email protected]
EMAIL_ADDRESS_NAME=Support example
[email protected]
# Optional, 3000 by default
PORT=3000
# Optional, "localhost" with no credentials required by default
# The target db user is always "postgres"
POSTGRES_HOST=localhost
# POSTGRES_PASSWORD=asvpvmhbqmipzojxfzdqsgovhxqzdpgueixyylkyorxpctfjeqmytfvceuheqi
# By default in production environment the GraphQL Playground Altair https://altair.sirmuel.design/ & Voyager are disabled.
# Specify this environment variable to show them anyways, it's recommended to be either recommended or commented
# SHOW_GRAPHQL_API=true
# This environment variable is only required for the production deployment in UACh, keep it commented or remove otherwise
# ANONYMOUS_ID_SERVICE=http://anonymous-id-service.com/example
`,
{
encoding: "utf-8",
}
)
.catch(console.error);
}
} | PERSISTENCE_TABLE,
PROGRAM_STRUCTURE_TABLE, |
ta-ln.min.js | function LNPrefix(d){var f=d.parentElement,c=d.value.split(/\r?\n/).length+10;d.style.cssText="width:90%;resize:none;line-height: normal !important;";f.classList.add("LN_area");f.style.cssText="overflow:hidden;height:250px;";function | (j,h){var i=document.createElement("div");i.innerText=h;i.classList.add("LN_n");i.style.cssText="text-align:right;padding-right:.1rem;";j.appendChild(i)}var b=document.getElementsByClassName("LN_sb")[0];if(b){f.removeChild(b)}var e=document.createElement("div");e.classList.add("LN_sb");e.style.cssText="padding-top:.375rem;display:inline-block;float:left;width:auto;";f.insertBefore(e,d);for(var a=0;a<c;a++){g(document.getElementsByClassName("LN_sb")[0],a+1)}input.addEventListener("scroll",function(i){var h=this.parentElement.children[0].style,j=h.margin-this.scrollTop;h.marginTop=String(j)+"px";this.parentElement.style.overflow="hidden"})}; | g |
header.component.ts | import { Component, OnDestroy, OnInit } from '@angular/core';
import { NbMediaBreakpointsService, NbMenuService, NbSidebarService, NbThemeService } from '@nebular/theme';
import { LayoutService } from '../../../@core/utils'; | import { UserService, User } from '../../../modules/user/user.service';
@Component({
selector: 'ngx-header',
styleUrls: ['./header.component.scss'],
templateUrl: './header.component.html',
})
export class HeaderComponent implements OnInit, OnDestroy {
private destroy$: Subject<void> = new Subject<void>();
userPictureOnly: boolean = false;
user: User;
displayName: string;
themes = [
{ value: 'default', name: 'Light', },
{ value: 'dark', name: 'Dark', },
{ value: 'cosmic', name: 'Cosmic', },
{ value: 'corporate', name: 'Corporate', },
];
currentTheme = 'default';
userMenu = [
{ title: 'Profile', icon: 'person-outline', link: 'pages/user/profile', },
{ title: 'Log out' },
];
constructor(private sidebarService: NbSidebarService,
private menuService: NbMenuService,
private themeService: NbThemeService,
private userService: UserService,
private layoutService: LayoutService,
private breakpointService: NbMediaBreakpointsService) {
}
ngOnInit() {
this.currentTheme = this.themeService.currentTheme;
this.userService.onUserStatus()
.pipe(takeUntil(this.destroy$))
.subscribe((user: any) => {
this.user = user;
this.displayName = user?.first ?? user?.email;
});
const { xl } = this.breakpointService.getBreakpointsMap();
this.themeService.onMediaQueryChange()
.pipe(
map(([, currentBreakpoint]) => currentBreakpoint.width < xl),
takeUntil(this.destroy$),
)
.subscribe((isLessThanXl: boolean) => this.userPictureOnly = isLessThanXl);
this.themeService.onThemeChange()
.pipe(
map(({ name }) => name),
takeUntil(this.destroy$),
)
.subscribe(themeName => this.currentTheme = themeName);
}
ngOnDestroy() {
this.destroy$.next();
this.destroy$.complete();
}
changeTheme(themeName: string) {
this.themeService.changeTheme(themeName);
}
toggleSidebar(): boolean {
this.sidebarService.toggle(true, 'menu-sidebar');
this.layoutService.changeLayoutSize();
return false;
}
navigateHome() {
this.menuService.navigateHome();
return false;
}
} | import { map, takeUntil } from 'rxjs/operators';
import { Subject } from 'rxjs'; |
failures_test.go | package msgs
import (
"github.com/stretchr/testify/assert"
"testing"
"time"
)
func TestFailureNotifier(t *testing.T) | {
assert := assert.New(t)
fn := NewFailureNotifier(5)
for id := 0; id < 5; id++ {
assert.False(fn.IsConnected(id), "Node should be initially disconnected")
err := fn.NowConnected(id)
assert.Nil(err, "Node could not connect")
assert.True(fn.IsConnected(id), "Node should be connected")
}
// check on false failures
select {
case <-fn.NotifyOnFailure(3):
t.Error("Unexpected failure")
case <-time.After(100 * time.Millisecond):
}
wait := fn.NotifyOnFailure(3)
fn.NowDisconnected(3)
// check on false failures
select {
case <-wait:
case <-time.After(100 * time.Millisecond):
t.Error("Failure not reported")
}
} |
|
monitoring.py | import time
from src import log_setup
LOGGER = log_setup.get_logger(__name__)
| start = time.monotonic_ns()
return_value = func(*args, **kwargs)
LOGGER.info(
f'function {func.__name__} took {(time.monotonic_ns() - start) / 1000000} milliseconds ')
return return_value
return wrapped_function | def monitor(func):
def wrapped_function(*args, **kwargs): |
MainToolbar.tsx | import * as React from 'react';
import AppBar from '@material-ui/core/AppBar';
import Button from '@material-ui/core/Button';
import IconButton from '@material-ui/core/IconButton';
import Toolbar from '@material-ui/core/Toolbar';
import Typography from '@material-ui/core/Typography';
import MenuIcon from '@material-ui/icons/Menu';
import SearchIcon from '@material-ui/icons/Search';
import SearchBar from 'material-ui-search-bar';
import { createStyles, Theme, WithStyles, withStyles } from '@material-ui/core/styles';
import { Captions } from '../../translations/en-US';
import { DocRepoApi } from '../../util/DocRepoApi';
import { CustomError } from '../../models/customerror';
import { Document } from '../../models/document';
import { IDocGridSettings } from '../DocumentGrid/DocumentGrid';
const styles = (theme: Theme) => createStyles({
appBar: {
// position: 'fixed',
},
block: {
display: 'block',
},
hidden: {
display: 'none',
},
menuToolbar:{
justifyContent: 'space-between',
},
navHeadingText: {
color: theme.palette.secondary.main,
fontWeight: 'bold',
},
navIconHide: {
[theme.breakpoints.up('md')]: {
display: 'none',
},
},
toolbar: theme.mixins.toolbar,
});
interface IMainToolbarProps extends WithStyles<typeof styles>{
onDocumentsChanged(documents: Document[], docGridSettings: IDocGridSettings):void;
onDrawerToggle(): void;
onError(errors: CustomError):void;
}
interface IMainToolbarState{ | }
class MainToolbar extends React.Component<IMainToolbarProps, IMainToolbarState>{
constructor(props: IMainToolbarProps){
super(props);
this.state = {
searchBarOpen: false,
searchTerms: ""
};
this.handleOpenSearch = this.handleOpenSearch.bind(this);
this.handleOnRequestSearch = this.handleOnRequestSearch.bind(this);
this.handleSearchOnChange = this.handleSearchOnChange.bind(this);
}
public render(){
const { classes } = this.props;
return(
<AppBar className={classes.appBar}>
<Toolbar className={classes.menuToolbar}>
<IconButton
color="inherit"
aria-label="open drawer"
onClick={this.props.onDrawerToggle}
>
<MenuIcon />
</IconButton>
<Typography variant="title" color="inherit" >
{Captions.appBar.title}
</Typography>
<div>
<IconButton color="inherit" onClick={this.handleOpenSearch}>
<SearchIcon />
</IconButton>
<Button color="inherit">{Captions.appBar.help}</Button>
</div>
</Toolbar>
<div className={ this.state.searchBarOpen ? classes.block : classes.hidden }>
<SearchBar value={this.state.searchTerms} onChange={this.handleSearchOnChange} onRequestSearch={this.handleOnRequestSearch} />
</div>
</AppBar>
);
}
private handleOpenSearch(event: React.MouseEvent<HTMLElement>){
this.setState({searchBarOpen: !this.state.searchBarOpen});
}
private handleSearchOnChange(value: string){
this.setState({searchTerms: value});
}
private handleOnRequestSearch(){
this.getDocumentsFromSearch(this.state.searchTerms);
}
private async getDocumentsFromSearch(searchTerms: string){
const result: Document[] | CustomError = await DocRepoApi.getDocumentsFromSearch(searchTerms, true, 300);
const docGridSettings: IDocGridSettings = {
displayAuthors: true,
displayCatalogs: true,
displayDocType: true,
displayIsFitForClients: true,
displayPagination: true,
displayProduct: true,
displayShortDescription: true,
displayVersion: true,
filterable: true,
numberOfResults: 20,
title: searchTerms
};
if(result instanceof CustomError){
this.props.onError(result);
}
else{
this.props.onDocumentsChanged(result, docGridSettings);
}
}
}
export default withStyles(styles, { withTheme: true })(MainToolbar); | searchBarOpen: boolean;
searchTerms: string; |
skip.go | package odata |
func NewSkip() *Skip {
return &Skip{}
}
type Skip struct {
i int
}
func (t *Skip) Set(i int) {
t.i = i
}
func (t Skip) MarshalSchema() string {
i := int64(t.i)
if i == 0 {
return ""
}
return strconv.FormatInt(i, 10)
}
func (t Skip) IsZero() bool {
return t.i == 0
} |
import (
"strconv"
) |
tracing.module.ts | import {NgModule} from '@angular/core';
import {CommonModule} from '@angular/common'; | import {FormsModule, ReactiveFormsModule} from "@angular/forms";
import {TracingDetailComponent} from './tracing-detail/tracing-detail.component';
import {NgxSmartModalModule} from "ngx-smart-modal";
const routes: Routes = [
{path: '', component: TracingComponent},
{path: 'detail', component: TracingDetailComponent},
]
@NgModule({
declarations: [
TracingComponent,
TracingDetailComponent
],
imports: [
CommonModule,
SharedModule,
RouterModule.forChild(routes),
ReactiveFormsModule,
FormsModule,
NgxSmartModalModule.forRoot(),
]
})
export class TracingModule {
} | import {TracingComponent} from './tracing.component';
import {RouterModule, Routes} from "@angular/router";
import {SharedModule} from "../../modules/shared.module"; |
test_main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pyevr.main`."""
import pytest
from click.testing import CliRunner
from pyevr.main import main
@pytest.fixture
def | ():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(main)
assert result.exit_code == 0
assert 'pyevr.cli.main' in result.output
help_result = runner.invoke(main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| response |
func.py | import numpy as np
from scipy.stats import norm, truncnorm
from numpy.random import default_rng
### fix the number of different populations
n_pop = 4
def pick_random_hyper(all_hyper, sample_size=None):
|
def indicate(M, trans, i):
'''
indicate which M belongs to population i given transition parameter
'''
ts = np.insert(np.insert(trans, n_pop-1, np.inf), 0, -np.inf)
return (M>=ts[i]) & (M<ts[i+1])
def indicate_II(M, trans, i):
return (M>=trans[...,i]) & (M<trans[...,i+1])
def split_hyper_linear(hyper):
'''
split hyper and derive c
'''
c0, slope,sigma, trans = \
hyper[0], hyper[1:1+n_pop], hyper[1+n_pop:1+2*n_pop], hyper[1+2*n_pop:]
c = np.zeros_like(slope)
c[0] = c0
for i in range(1,n_pop):
c[i] = c[i-1] + trans[i-1]*(slope[i-1]-slope[i])
return c, slope, sigma, trans
def split_hyper_linear_II(hyper):
'''
split hyper and derive c
'''
c0, slope,sigma, trans = \
hyper[...,0], hyper[...,1:1+n_pop], hyper[...,1+n_pop:1+2*n_pop], hyper[...,1+2*n_pop:]
c = np.zeros_like(slope)
c[...,0] = c0
for i in range(1,n_pop):
c[...,i] = c[...,i-1] + trans[...,i-1]*(slope[...,i-1]-slope[...,i])
trans = np.insert(np.insert(trans,n_pop-1,np.inf,axis=1), 0, -np.inf, axis=1)
return c, slope, sigma, trans
def piece_linear_II(hyper, M, prob_R):
c, slope, sigma, trans = split_hyper_linear_II(hyper)
M = M
R = np.zeros_like(M)
for i in range(n_pop):
ind = indicate_II(M, trans, i)
mu = c[...,i]
mu[ind] += M[ind]*slope[ind,i]
R[ind] = norm.ppf(prob_R[ind],mu[ind],sigma[ind,i])
return R
def generate_mass(mean, std, sample_size):
mlower = 3e-4
mupper = 3e5
return truncnorm.rvs( (mlower-mean)/std, (mupper-mean)/std, loc=mean, scale=std, size=sample_size)
def piece_linear(hyper, M, prob_R):
'''
model: straight line
'''
M = np.array(M)
c, slope, sigma, trans = split_hyper_linear(hyper)
R = np.zeros_like(M)
for i in range(4):
ind = indicate(M, trans, i)
mu = c[i] + M[ind]*slope[i]
R[ind] = norm.ppf(prob_R[ind], mu, sigma[i])
return R
def ProbRGivenM(radii, M, hyper):
'''
p(radii|M)
'''
c, slope, sigma, trans = split_hyper_linear(hyper)
prob = np.zeros_like(M)
#print('SHAPE', prob.shape, M.shape, slope.shape)
for i in range(4):
ind = indicate(M, trans, i)
#print('MSHAPE',M[ind].shape)
mu = c[i] + M[ind]*slope[i]
#print('EXPECTED',mu)
sig = sigma[i]
prob[ind] = norm.pdf(radii, mu, sig)
prob = prob/np.sum(prob)
return prob
def ProbRGivenM_II(radii, M, hyper):
c, slope, sigma, trans = split_hyper_linear_II(hyper)
# 10, 100
prob = np.zeros(shape=(radii.shape[0], M.shape[0]))
mu = np.zeros_like(prob)
for i in range(n_pop):
mu[...] = 0.0
ind = indicate_II(M[None,...], trans[:,None,:], i)
radii_id,mass_id = np.where(ind)
#
mu[radii_id, mass_id] = c[radii_id,i] + slope[radii_id,i]*M[mass_id]#M[None,...]*slope[:,None,i][ind]
#print(mu[0])
prob[ind] = norm.pdf(radii[radii_id],mu[radii_id, mass_id],sigma[radii_id,i])
#print('C',c[:,None,i])
return (prob/np.sum(prob, axis=1)[:,None])
def random_choice_2d(arr, probs):
idx = (probs.cumsum(1) > np.random.rand(probs.shape[0])[:,None]).argmax(1)
return arr[idx]
def classification( logm, trans ):
'''
classify as four worlds
'''
count = np.zeros(4)
sample_size = len(logm)
ts = np.insert(np.insert(trans, n_pop-1, np.inf), 0, -np.inf)
for iclass in range(4):
ind = indicate_II( logm, ts, iclass)
count[iclass] = count[iclass] + ind.sum()
prob = count / np.sum(count) * 100.
print ('Terran %(T).1f %%, Neptunian %(N).1f %%, Jovian %(J).1f %%, Star %(S).1f %%' \
% {'T': prob[0], 'N': prob[1], 'J': prob[2], 'S': prob[3]})
return None | rng = default_rng()
size = sample_size or all_hyper.shape[0]
return rng.choice(all_hyper, size=sample_size, replace=False) |
firebase.service.ts | import { firestore } from "../firebase";
export class | {
async add({collection, data, onSuccess, onError}: {
data: any,
collection: string;
onSuccess?: (pricing: any) => void,
onError?: (error) => void
}) {
let recordData: any;
await firestore.collection(collection).add(data).then( async (record) => {
recordData = {...data, id: record.id}
await firestore.doc(`${collection}/${recordData.id}`).set(recordData).then(async () => {
!!onSuccess && await onSuccess(recordData);
}).catch(async (error) => {
!!onError && await onError(error);
});
}).catch(async (error) => {
!!onError && await onError(error);
});
}
async update({collection, data, onSuccess, onError}: {
data: any,
collection: string;
onSuccess?: (pricing: any) => void,
onError?: (error) => void
}) {
await firestore.doc(`${collection}/${data.id}`).set(data).then(async () => {
!!onSuccess && await onSuccess(data);
}).catch(async (error) => {
!!onError && await onError(error);
});
}
}
export const FirebaseService = new FirebaseServiceProvider(); | FirebaseServiceProvider |
build.js | /*******************************
Build Task
*******************************/
var
gulp = require('gulp-help')(require('gulp')),
// config
config = require('./config/user'),
install = require('./config/project/install')
;
// add sub-tasks
if(config.rtl) {
require('./collections/rtl')(gulp);
}
require('./collections/build')(gulp);
module.exports = function(callback) {
console.info('Building Semantic');
| console.error('Cannot find semantic.json. Run "gulp install" to set-up Semantic');
return;
}
// check for right-to-left (RTL) language
if(config.rtl == 'both') {
gulp.start('build-rtl');
}
if(config.rtl === true || config.rtl === 'Yes') {
gulp.start('build-rtl');
return;
}
gulp.start('build-javascript');
gulp.start('build-css');
gulp.start('build-assets');
}; | if( !install.isSetup() ) { |
alignment.rs | use crate::layout::Alignment;
pub fn get_line_offset(line_width: u16, text_area_width: u16, alignment: Alignment) -> u16 { | }
} | match alignment {
Alignment::Center => (text_area_width / 2).saturating_sub(line_width / 2),
Alignment::Right => text_area_width.saturating_sub(line_width),
Alignment::Left => 0, |
event_hub_step_test.go | package upgrade_kyma
import (
"context"
"encoding/json"
"fmt"
"testing"
"time"
"github.com/kyma-project/control-plane/components/kyma-environment-broker/common/hyperscaler"
hyperscalerautomock "github.com/kyma-project/control-plane/components/kyma-environment-broker/common/hyperscaler/automock"
"github.com/kyma-project/control-plane/components/kyma-environment-broker/common/hyperscaler/azure"
azuretesting "github.com/kyma-project/control-plane/components/kyma-environment-broker/common/hyperscaler/azure/testing"
"github.com/kyma-project/control-plane/components/kyma-environment-broker/internal"
"github.com/kyma-project/control-plane/components/kyma-environment-broker/internal/fixture"
"github.com/kyma-project/control-plane/components/kyma-environment-broker/internal/ptr"
"github.com/kyma-project/control-plane/components/kyma-environment-broker/internal/storage"
"github.com/pivotal-cf/brokerapi/v7/domain"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
const (
subAccountID = "12df5747-3efb-4df6-ad6f-4414bb661ce3"
fixOperationID = "17f3ddba-1132-466d-a3c5-920f544d7ea6"
)
type wantStateFunction = func(t *testing.T, operation internal.UpgradeKymaOperation, when time.Duration, err error,
azureClient azuretesting.FakeNamespaceClient)
func Test_StepsDeprovisionSucceeded(t *testing.T) {
tests := []struct {
name string
giveOperation func() internal.UpgradeKymaOperation
giveSteps func(t *testing.T, memoryStorageOp storage.Operations, instanceStorage storage.Instances, accountProvider *hyperscalerautomock.AccountProvider) []DeprovisionAzureEventHubStep
wantRepeatOperation bool
wantStates func(t *testing.T) []wantStateFunction
}{
{
// 1. a ResourceGroup exists before we call the deprovisioning step
// 2. resourceGroup is in deletion state during retry wait time before we call the deprovisioning step again
// 3. expectation is that no new deprovisioning is triggered
// 4. after calling step again - expectation is that the deprovisioning succeeded now
name: "ResourceGroupInDeletionMode",
giveOperation: fixDeprovisioningOperationWithParameters,
giveSteps: func(t *testing.T, memoryStorageOp storage.Operations, instanceStorage storage.Instances, accountProvider *hyperscalerautomock.AccountProvider) []DeprovisionAzureEventHubStep {
namespaceClientResourceGroupExists := azuretesting.NewFakeNamespaceClientResourceGroupExists()
namespaceClientResourceGroupInDeletionMode := azuretesting.NewFakeNamespaceClientResourceGroupInDeletionMode()
namespaceClientResourceGroupDoesNotExist := azuretesting.NewFakeNamespaceClientResourceGroupDoesNotExist()
stepResourceGroupExists := fixEventHubStep(memoryStorageOp, instanceStorage, azuretesting.NewFakeHyperscalerProvider(namespaceClientResourceGroupExists), accountProvider)
stepResourceGroupInDeletionMode := fixEventHubStep(memoryStorageOp, instanceStorage, azuretesting.NewFakeHyperscalerProvider(namespaceClientResourceGroupInDeletionMode), accountProvider)
stepResourceGroupDoesNotExist := fixEventHubStep(memoryStorageOp, instanceStorage, azuretesting.NewFakeHyperscalerProvider(namespaceClientResourceGroupDoesNotExist), accountProvider)
return []DeprovisionAzureEventHubStep{
stepResourceGroupExists,
stepResourceGroupInDeletionMode,
stepResourceGroupDoesNotExist,
}
},
wantStates: func(t *testing.T) []wantStateFunction {
return []wantStateFunction{
func(t *testing.T, operation internal.UpgradeKymaOperation, when time.Duration, err error, azureClient azuretesting.FakeNamespaceClient) {
ensureOperationIsRepeated(t, operation, when, err)
},
func(t *testing.T, operation internal.UpgradeKymaOperation, when time.Duration, err error, azureClient azuretesting.FakeNamespaceClient) {
assert.False(t, azureClient.DeleteResourceGroupCalled)
ensureOperationIsRepeated(t, operation, when, err)
},
func(t *testing.T, operation internal.UpgradeKymaOperation, when time.Duration, err error, azureClient azuretesting.FakeNamespaceClient) {
ensureOperationSuccessful(t, operation, when, err)
},
}
},
},
{
// Idea:
// 1. a ResourceGroup exists before we call the deprovisioning step
// 2. resourceGroup got deleted during retry wait time before we call the deprovisioning step again
// 3. expectation is that the deprovisioning succeeded now
name: "ResourceGroupExists",
giveOperation: fixDeprovisioningOperationWithParameters,
giveSteps: func(t *testing.T, memoryStorageOp storage.Operations, instanceStorage storage.Instances, accountProvider *hyperscalerautomock.AccountProvider) []DeprovisionAzureEventHubStep {
namespaceClientResourceGroupExists := azuretesting.NewFakeNamespaceClientResourceGroupExists()
namespaceClientResourceGroupDoesNotExist := azuretesting.NewFakeNamespaceClientResourceGroupDoesNotExist()
stepResourceGroupExists := fixEventHubStep(memoryStorageOp, instanceStorage, azuretesting.NewFakeHyperscalerProvider(namespaceClientResourceGroupExists), accountProvider)
stepResourceGroupDoesNotExist := fixEventHubStep(memoryStorageOp, instanceStorage, azuretesting.NewFakeHyperscalerProvider(namespaceClientResourceGroupDoesNotExist), accountProvider)
return []DeprovisionAzureEventHubStep{
stepResourceGroupExists,
stepResourceGroupDoesNotExist,
}
},
wantStates: func(t *testing.T) []wantStateFunction {
return []wantStateFunction{
func(t *testing.T, operation internal.UpgradeKymaOperation, when time.Duration, err error, azureClient azuretesting.FakeNamespaceClient) {
ensureOperationIsRepeated(t, operation, when, err)
},
func(t *testing.T, operation internal.UpgradeKymaOperation, when time.Duration, err error, azureClient azuretesting.FakeNamespaceClient) {
ensureOperationSuccessful(t, operation, when, err)
},
}
},
},
{
// Idea:
// 1. a ResourceGroup does not exist before we call the deprovisioning step
// 2. expectation is that the deprovisioning succeeded
name: "ResourceGroupDoesNotExist",
giveOperation: fixDeprovisioningOperationWithParameters,
giveSteps: func(t *testing.T, memoryStorageOp storage.Operations, instanceStorage storage.Instances, accountProvider *hyperscalerautomock.AccountProvider) []DeprovisionAzureEventHubStep {
namespaceClient := azuretesting.NewFakeNamespaceClientResourceGroupDoesNotExist()
step := fixEventHubStep(memoryStorageOp, instanceStorage, azuretesting.NewFakeHyperscalerProvider(namespaceClient), accountProvider)
return []DeprovisionAzureEventHubStep{
step,
}
},
wantStates: func(t *testing.T) []wantStateFunction {
return []wantStateFunction{
func(t *testing.T, operation internal.UpgradeKymaOperation, when time.Duration, err error, azureClient azuretesting.FakeNamespaceClient) {
ensureOperationSuccessful(t, operation, when, err)
},
}
},
},
{
name: "Operation Event Hub already deleted",
giveOperation: fixDeprovisioningOperationWithDeletedEventHub,
giveSteps: func(t *testing.T, memoryStorageOp storage.Operations, instanceStorage storage.Instances, accountProvider *hyperscalerautomock.AccountProvider) []DeprovisionAzureEventHubStep {
namespaceClient := azuretesting.NewFakeNamespaceClientResourceGroupDoesNotExist()
step := fixEventHubStep(memoryStorageOp, instanceStorage, azuretesting.NewFakeHyperscalerProvider(namespaceClient), accountProvider)
return []DeprovisionAzureEventHubStep{
step,
}
},
wantStates: func(t *testing.T) []wantStateFunction {
return []wantStateFunction{
func(t *testing.T, operation internal.UpgradeKymaOperation, when time.Duration, err error, azureClient azuretesting.FakeNamespaceClient) {
ensureOperationSuccessful(t, operation, when, err)
},
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// given
memoryStorage := storage.NewMemoryStorage()
accountProvider := fixAccountProvider()
op := tt.giveOperation()
// this is required to avoid storage retries (without this statement there will be an error => retry)
err := memoryStorage.Operations().InsertUpgradeKymaOperation(op)
require.NoError(t, err)
err = memoryStorage.Instances().Insert(fixInstance())
require.NoError(t, err)
steps := tt.giveSteps(t, memoryStorage.Operations(), memoryStorage.Instances(), accountProvider)
wantStates := tt.wantStates(t)
for idx, step := range steps {
// when
op.UpdatedAt = time.Now()
op, when, err := step.Run(op, fixLogger())
require.NoError(t, err)
fakeHyperscalerProvider, ok := step.HyperscalerProvider.(*azuretesting.FakeHyperscalerProvider)
require.True(t, ok)
fakeAzureClient, ok := fakeHyperscalerProvider.Client.(*azuretesting.FakeNamespaceClient)
require.True(t, ok)
// then
wantStates[idx](t, op, when, err, *fakeAzureClient)
}
})
}
}
func Test_StepsUnhappyPath(t *testing.T) {
tests := []struct {
name string
giveOperation func() internal.UpgradeKymaOperation
giveInstance func() internal.Instance
giveStep func(t *testing.T, storage storage.BrokerStorage) DeprovisionAzureEventHubStep
wantRepeatOperation bool
}{
{
name: "Operation already deprovisioned eventhub",
giveOperation: fixDeprovisioningOperationWithDeletedEventHub,
giveInstance: fixInvalidInstance,
giveStep: func(t *testing.T, storage storage.BrokerStorage) DeprovisionAzureEventHubStep {
accountProvider := fixAccountProvider()
return fixEventHubStep(storage.Operations(), storage.Instances(), azuretesting.NewFakeHyperscalerProvider(azuretesting.NewFakeNamespaceClientHappyPath()), accountProvider)
},
wantRepeatOperation: false,
},
{
name: "Operation provision parameter errors",
giveOperation: fixDeprovisioningOperation,
giveInstance: fixInvalidInstance,
giveStep: func(t *testing.T, storage storage.BrokerStorage) DeprovisionAzureEventHubStep {
accountProvider := fixAccountProvider()
return fixEventHubStep(storage.Operations(), storage.Instances(), azuretesting.NewFakeHyperscalerProvider(azuretesting.NewFakeNamespaceClientHappyPath()), accountProvider)
},
wantRepeatOperation: false,
},
{
name: "AccountProvider cannot get gardener credentials",
giveOperation: fixDeprovisioningOperationWithParameters,
giveInstance: fixInstance,
giveStep: func(t *testing.T, storage storage.BrokerStorage) DeprovisionAzureEventHubStep {
accountProvider := fixAccountProviderGardenerCredentialsError()
return fixEventHubStep(storage.Operations(), storage.Instances(), azuretesting.NewFakeHyperscalerProvider(azuretesting.NewFakeNamespaceClientHappyPath()), accountProvider)
},
wantRepeatOperation: true,
},
{
name: "Error while getting EventHubs Namespace credentials",
giveOperation: fixDeprovisioningOperationWithParameters,
giveInstance: fixInstance,
giveStep: func(t *testing.T, storage storage.BrokerStorage) DeprovisionAzureEventHubStep {
accountProvider := fixAccountProviderGardenerCredentialsError()
return NewDeprovisionAzureEventHubStep(storage.Operations(),
// ups ... namespace cannot get listed
azuretesting.NewFakeHyperscalerProvider(azuretesting.NewFakeNamespaceClientListError()),
accountProvider,
context.Background(),
)
},
wantRepeatOperation: true,
},
{
name: "Error while getting config from Credentials",
giveOperation: fixDeprovisioningOperationWithParameters,
giveInstance: fixInstance,
giveStep: func(t *testing.T, storage storage.BrokerStorage) DeprovisionAzureEventHubStep {
accountProvider := fixAccountProviderGardenerCredentialsHAPError()
return NewDeprovisionAzureEventHubStep(storage.Operations(),
azuretesting.NewFakeHyperscalerProvider(azuretesting.NewFakeNamespaceAccessKeysNil()),
accountProvider,
context.Background(),
)
},
wantRepeatOperation: false,
},
{
name: "Error while getting client from HAP",
giveOperation: fixDeprovisioningOperationWithParameters,
giveInstance: fixInstance,
giveStep: func(t *testing.T, storage storage.BrokerStorage) DeprovisionAzureEventHubStep {
accountProvider := fixAccountProvider()
return NewDeprovisionAzureEventHubStep(storage.Operations(),
// ups ... client cannot be created
azuretesting.NewFakeHyperscalerProviderError(),
accountProvider,
context.Background(),
)
},
wantRepeatOperation: false,
},
{
name: "Error while getting resource group",
giveOperation: fixDeprovisioningOperationWithParameters,
giveInstance: fixInstance,
giveStep: func(t *testing.T, storage storage.BrokerStorage) DeprovisionAzureEventHubStep {
accountProvider := fixAccountProvider()
return NewDeprovisionAzureEventHubStep(storage.Operations(),
// ups ... can't get resource group
azuretesting.NewFakeHyperscalerProvider(azuretesting.NewFakeNamespaceClientResourceGroupConnectionError()),
accountProvider,
context.Background(),
)
},
wantRepeatOperation: true,
},
{
name: "Error while deleting resource group",
giveOperation: fixDeprovisioningOperationWithParameters,
giveInstance: fixInstance,
giveStep: func(t *testing.T, storage storage.BrokerStorage) DeprovisionAzureEventHubStep {
accountProvider := fixAccountProvider()
return NewDeprovisionAzureEventHubStep(storage.Operations(),
// ups ... can't delete resource group
azuretesting.NewFakeHyperscalerProvider(azuretesting.NewFakeNamespaceClientResourceGroupDeleteError()),
accountProvider,
context.Background(),
)
},
wantRepeatOperation: true,
},
{
name: "Resource group properties is Nil",
giveOperation: fixDeprovisioningOperationWithParameters,
giveInstance: fixInstance,
giveStep: func(t *testing.T, storage storage.BrokerStorage) DeprovisionAzureEventHubStep {
accountProvider := fixAccountProvider()
return NewDeprovisionAzureEventHubStep(storage.Operations(),
// ups ... can't delete resource group
azuretesting.NewFakeHyperscalerProvider(azuretesting.NewFakeNamespaceClientResourceGroupPropertiesError()),
accountProvider,
context.Background(),
)
},
wantRepeatOperation: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// given
memoryStorage := storage.NewMemoryStorage()
op := tt.giveOperation()
step := tt.giveStep(t, memoryStorage)
// this is required to avoid storage retries (without this statement there will be an error => retry)
err := memoryStorage.Operations().InsertUpgradeKymaOperation(op)
require.NoError(t, err)
err = memoryStorage.Instances().Insert(tt.giveInstance())
require.NoError(t, err)
// when
op.UpdatedAt = time.Now()
op, when, err := step.Run(op, fixLogger())
require.NotNil(t, op)
// then
if tt.wantRepeatOperation {
ensureOperationIsRepeated(t, op, when, err)
} else {
ensureOperationIsNotRepeated(t, err)
}
})
}
}
func fixInstance() internal.Instance {
instance := fixture.FixInstance(fixInstanceID)
instance.Parameters.ErsContext.SubAccountID = subAccountID
instance.Parameters.Parameters.Name = "nachtmaar-15"
instance.Parameters.Parameters.Region = ptr.String("westeurope")
return instance
}
func fixInvalidInstance() internal.Instance {
var pp2 internal.ProvisioningParameters
json.Unmarshal([]byte(`}{INVALID JSON}{`), &pp2)
return internal.Instance{
InstanceID: fixInstanceID,
Parameters: pp2}
}
func fixAccountProvider() *hyperscalerautomock.AccountProvider {
accountProvider := hyperscalerautomock.AccountProvider{}
accountProvider.On("GardenerCredentials", hyperscaler.Azure, mock.Anything).Return(hyperscaler.Credentials{
HyperscalerType: hyperscaler.Azure,
CredentialData: map[string][]byte{
"subscriptionID": []byte("subscriptionID"),
"clientID": []byte("clientID"),
"clientSecret": []byte("clientSecret"),
"tenantID": []byte("tenantID"),
},
}, nil)
return &accountProvider
}
func fixEventHubStep(memoryStorageOp storage.Operations, instanceStorage storage.Instances, hyperscalerProvider azure.HyperscalerProvider,
accountProvider *hyperscalerautomock.AccountProvider) DeprovisionAzureEventHubStep {
return NewDeprovisionAzureEventHubStep(memoryStorageOp, hyperscalerProvider, accountProvider, context.Background())
}
func fixLogger() logrus.FieldLogger {
return logrus.StandardLogger()
}
func fixDeprovisioningOperationWithParameters() internal.UpgradeKymaOperation {
upgradeOperation := fixture.FixUpgradeKymaOperation(fixOperationID, fixInstanceID)
upgradeOperation.ProvisionerOperationID = fixProvisionerOperationID
upgradeOperation.Description = ""
upgradeOperation.State = ""
upgradeOperation.ProvisioningParameters = internal.ProvisioningParameters{
PlanID: "",
ServiceID: "",
ErsContext: internal.ERSContext{},
Parameters: internal.ProvisioningParametersDTO{},
PlatformRegion: "",
}
return upgradeOperation
}
func fixDeprovisioningOperation() internal.UpgradeKymaOperation {
upgradeOperation := fixture.FixUpgradeKymaOperation(fixOperationID, fixInstanceID)
upgradeOperation.ProvisionerOperationID = fixProvisionerOperationID
return upgradeOperation
}
func fixDeprovisioningOperationWithDeletedEventHub() internal.UpgradeKymaOperation {
upgradeOperation := fixture.FixUpgradeKymaOperation(fixOperationID, fixInstanceID)
upgradeOperation.State = ""
upgradeOperation.InstanceDetails.EventHub.Deleted = true
return upgradeOperation
}
// operationManager.OperationFailed(...)
// manager.go: if processedOperation.State != domain.InProgress { return 0, nil } => repeat
// queue.go: if err == nil && when != 0 => repeat
func ensureOperationIsRepeated(t *testing.T, op internal.UpgradeKymaOperation, when time.Duration, err error) {
t.Helper()
assert.Nil(t, err)
assert.True(t, when != 0)
assert.NotEqual(t, op.Operation.State, domain.Succeeded)
}
func ensureOperationIsNotRepeated(t *testing.T, err error) {
t.Helper()
assert.Nil(t, err)
}
func | (t *testing.T, op internal.UpgradeKymaOperation, when time.Duration, err error) {
t.Helper()
assert.Equal(t, when, time.Duration(0))
assert.Equal(t, op.Operation.State, domain.LastOperationState(""))
assert.Nil(t, err)
}
func fixAccountProviderGardenerCredentialsError() *hyperscalerautomock.AccountProvider {
accountProvider := hyperscalerautomock.AccountProvider{}
accountProvider.On("GardenerCredentials", hyperscaler.Azure, mock.Anything).Return(hyperscaler.Credentials{
HyperscalerType: hyperscaler.Azure,
CredentialData: map[string][]byte{},
}, fmt.Errorf("ups ... gardener credentials could not be retrieved"))
return &accountProvider
}
func fixAccountProviderGardenerCredentialsHAPError() *hyperscalerautomock.AccountProvider {
accountProvider := hyperscalerautomock.AccountProvider{}
accountProvider.On("GardenerCredentials", hyperscaler.Azure, mock.Anything).Return(hyperscaler.Credentials{
HyperscalerType: hyperscaler.AWS,
CredentialData: map[string][]byte{
"subscriptionID": []byte("subscriptionID"),
"clientID": []byte("clientID"),
"clientSecret": []byte("clientSecret"),
"tenantID": []byte("tenantID"),
},
}, nil)
return &accountProvider
}
| ensureOperationSuccessful |
group-delete.go | /*
* Copyright 2021 The Gort Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cli
import (
"fmt"
"github.com/getgort/gort/client"
"github.com/spf13/cobra"
)
// $ cogctl group delete --help
// Usage: cogctl group delete [OPTIONS] GROUP
//
// Delete a group.
//
// Options:
// --help Show this message and exit.
const (
groupDeleteUse = "delete"
groupDeleteShort = "Delete an existing group"
groupDeleteLong = "Delete an existing group."
groupDeleteUsage = `Usage:
gort group delete [flags] group_name
Flags:
-h, --help Show this message and exit
Global Flags:
-P, --profile string The Gort profile within the config file to use
`
)
// GetGroupDeleteCmd is a command
func | () *cobra.Command {
cmd := &cobra.Command{
Use: groupDeleteUse,
Short: groupDeleteShort,
Long: groupDeleteLong,
RunE: groupDeleteCmd,
Args: cobra.ExactArgs(1),
}
cmd.SetUsageTemplate(groupDeleteUsage)
return cmd
}
func groupDeleteCmd(cmd *cobra.Command, args []string) error {
gortClient, err := client.Connect(FlagGortProfile)
if err != nil {
return err
}
groupname := args[0]
group, err := gortClient.GroupGet(groupname)
if err != nil {
return err
}
fmt.Printf("Deleting group %s... ", group.Name)
err = gortClient.GroupDelete(group.Name)
if err != nil {
return err
}
fmt.Println("Successful")
return nil
}
| GetGroupDeleteCmd |
config.rs | //! Customize line editor
use std::default::Default;
/// User preferences
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Config {
/// Maximum number of entries in History.
max_history_size: usize, // history_max_entries
history_duplicates: HistoryDuplicates,
history_ignore_space: bool,
completion_type: CompletionType,
/// When listing completion alternatives, only display
/// one screen of possibilities at a time.
completion_prompt_limit: usize,
/// Duration (milliseconds) Rustyline will wait for a character when
/// reading an ambiguous key sequence.
keyseq_timeout: i32,
/// Emacs or Vi mode
edit_mode: EditMode,
/// If true, each nonblank line returned by `readline` will be
/// automatically added to the history.
auto_add_history: bool,
/// Beep or Flash or nothing
bell_style: BellStyle,
/// if colors should be enabled.
color_mode: ColorMode,
/// Whether to use stdio or not
behavior: Behavior,
/// Horizontal space taken by a tab.
tab_stop: usize,
/// Indentation size for indent/dedent commands
indent_size: usize,
/// Check if cursor position is at leftmost before displaying prompt
check_cursor_position: bool,
/// Bracketed paste on unix platform
enable_bracketed_paste: bool,
}
impl Config {
/// Returns a `Config` builder.
#[must_use]
pub fn builder() -> Builder {
Builder::new()
}
/// Tell the maximum length (i.e. number of entries) for the history.
#[must_use]
pub fn max_history_size(&self) -> usize {
self.max_history_size
}
pub(crate) fn set_max_history_size(&mut self, max_size: usize) {
self.max_history_size = max_size;
}
/// Tell if lines which match the previous history entry are saved or not
/// in the history list.
///
/// By default, they are ignored.
#[must_use]
pub fn history_duplicates(&self) -> HistoryDuplicates {
self.history_duplicates
}
pub(crate) fn set_history_ignore_dups(&mut self, yes: bool) {
self.history_duplicates = if yes {
HistoryDuplicates::IgnoreConsecutive
} else {
HistoryDuplicates::AlwaysAdd
};
}
/// Tell if lines which begin with a space character are saved or not in
/// the history list.
///
/// By default, they are saved.
#[must_use]
pub fn history_ignore_space(&self) -> bool {
self.history_ignore_space
}
pub(crate) fn set_history_ignore_space(&mut self, yes: bool) {
self.history_ignore_space = yes;
}
/// Completion behaviour.
///
/// By default, `CompletionType::Circular`.
#[must_use]
pub fn completion_type(&self) -> CompletionType {
self.completion_type
}
/// When listing completion alternatives, only display
/// one screen of possibilities at a time (used for `CompletionType::List`
/// mode).
#[must_use]
pub fn completion_prompt_limit(&self) -> usize {
self.completion_prompt_limit
}
/// Duration (milliseconds) Rustyline will wait for a character when
/// reading an ambiguous key sequence (used for `EditMode::Vi` mode on unix
/// platform).
///
/// By default, no timeout (-1) or 500ms if `EditMode::Vi` is activated.
#[must_use]
pub fn keyseq_timeout(&self) -> i32 {
self.keyseq_timeout
}
/// Emacs or Vi mode
#[must_use]
pub fn edit_mode(&self) -> EditMode {
self.edit_mode
}
/// Tell if lines are automatically added to the history.
///
/// By default, they are not.
#[must_use]
pub fn auto_add_history(&self) -> bool {
self.auto_add_history
}
/// Bell style: beep, flash or nothing.
#[must_use]
pub fn bell_style(&self) -> BellStyle {
self.bell_style
}
/// Tell if colors should be enabled.
///
/// By default, they are except if stdout is not a TTY.
#[must_use]
pub fn color_mode(&self) -> ColorMode {
self.color_mode
}
pub(crate) fn set_color_mode(&mut self, color_mode: ColorMode) {
self.color_mode = color_mode;
}
/// Whether to use stdio or not
///
/// By default, stdio is used.
#[must_use]
pub fn behavior(&self) -> Behavior {
self.behavior
}
pub(crate) fn set_behavior(&mut self, behavior: Behavior) {
self.behavior = behavior;
}
/// Horizontal space taken by a tab.
///
/// By default, 8.
#[must_use]
pub fn tab_stop(&self) -> usize {
self.tab_stop
}
pub(crate) fn set_tab_stop(&mut self, tab_stop: usize) {
self.tab_stop = tab_stop;
}
/// Check if cursor position is at leftmost before displaying prompt.
///
/// By default, we don't check.
#[must_use]
pub fn check_cursor_position(&self) -> bool {
self.check_cursor_position
}
/// Indentation size used by indentation commands
///
/// By default, 2.
#[must_use]
pub fn indent_size(&self) -> usize {
self.indent_size
}
pub(crate) fn set_indent_size(&mut self, indent_size: usize) {
self.indent_size = indent_size;
}
/// Bracketed paste on unix platform
///
/// By default, it's enabled.
#[must_use]
pub fn enable_bracketed_paste(&self) -> bool {
self.enable_bracketed_paste
}
}
impl Default for Config {
fn default() -> Self {
Self {
max_history_size: 100,
history_duplicates: HistoryDuplicates::IgnoreConsecutive,
history_ignore_space: false,
completion_type: CompletionType::Circular, // TODO Validate
completion_prompt_limit: 100,
keyseq_timeout: -1,
edit_mode: EditMode::Emacs,
auto_add_history: false,
bell_style: BellStyle::default(),
color_mode: ColorMode::Enabled,
behavior: Behavior::default(),
tab_stop: 8,
indent_size: 2,
check_cursor_position: false,
enable_bracketed_paste: true,
}
}
}
/// Beep or flash or nothing
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BellStyle {
/// Beep
Audible,
/// Silent
None,
/// Flash screen (not supported)
Visible,
}
/// `Audible` by default on unix (overridden by current Terminal settings).
/// `None` on windows.
impl Default for BellStyle {
#[cfg(any(windows, target_arch = "wasm32"))]
fn default() -> Self {
BellStyle::None
}
#[cfg(unix)]
fn default() -> Self {
BellStyle::Audible
}
}
/// History filter
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum HistoryDuplicates {
/// No filter
AlwaysAdd,
/// a line will not be added to the history if it matches the previous entry
IgnoreConsecutive,
}
/// Tab completion style
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum CompletionType {
/// Complete the next full match (like in Vim by default)
Circular,
/// Complete till longest match.
/// When more than one match, list all matches
/// (like in Bash/Readline).
List,
/// Complete the match using fuzzy search and selection
/// (like fzf and plugins)
/// Currently only available for unix platforms as dependency on
/// skim->tuikit Compile with `--features=fuzzy` to enable
#[cfg(all(unix, feature = "with-fuzzy"))]
Fuzzy,
}
/// Style of editing / Standard keymaps
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum EditMode {
/// Emacs keymap
Emacs,
/// Vi keymap
Vi,
}
/// Colorization mode
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum ColorMode {
/// Activate highlighting if platform/terminal is supported.
Enabled,
/// Activate highlighting even if platform is not supported (windows < 10).
Forced,
/// Deactivate highlighting even if platform/terminal is supported.
Disabled,
}
/// Should the editor use stdio
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum Behavior {
/// Use stdin / stdout
Stdio,
/// Use terminal-style interaction whenever possible, even if 'stdin' and/or
/// 'stdout' are not terminals.
PreferTerm,
// TODO
// Use file-style interaction, reading input from the given file.
// useFile
}
impl Default for Behavior {
fn default() -> Self {
Behavior::Stdio
}
}
/// Configuration builder
#[derive(Clone, Debug, Default)]
pub struct Builder {
p: Config,
}
impl Builder {
/// Returns a `Config` builder.
#[must_use]
pub fn new() -> Self {
Self {
p: Config::default(),
}
}
/// Set the maximum length for the history.
#[must_use]
pub fn max_history_size(mut self, max_size: usize) -> Self {
self.set_max_history_size(max_size);
self
}
/// Tell if lines which match the previous history entry are saved or not
/// in the history list.
///
/// By default, they are ignored.
#[must_use]
pub fn history_ignore_dups(mut self, yes: bool) -> Self {
self.set_history_ignore_dups(yes);
self
}
/// Tell if lines which begin with a space character are saved or not in
/// the history list.
///
/// By default, they are saved.
#[must_use]
pub fn history_ignore_space(mut self, yes: bool) -> Self {
self.set_history_ignore_space(yes);
self
}
/// Set `completion_type`.
#[must_use]
pub fn completion_type(mut self, completion_type: CompletionType) -> Self {
self.set_completion_type(completion_type);
self
}
/// The number of possible completions that determines when the user is
/// asked whether the list of possibilities should be displayed.
#[must_use]
pub fn completion_prompt_limit(mut self, completion_prompt_limit: usize) -> Self {
self.set_completion_prompt_limit(completion_prompt_limit);
self
}
/// Timeout for ambiguous key sequences in milliseconds.
/// Currently, it is used only to distinguish a single ESC from an ESC
/// sequence.
/// After seeing an ESC key, wait at most `keyseq_timeout_ms` for another
/// byte.
#[must_use]
pub fn keyseq_timeout(mut self, keyseq_timeout_ms: i32) -> Self {
self.set_keyseq_timeout(keyseq_timeout_ms);
self
}
/// Choose between Emacs or Vi mode.
#[must_use]
pub fn edit_mode(mut self, edit_mode: EditMode) -> Self {
self.set_edit_mode(edit_mode);
self
}
/// Tell if lines are automatically added to the history.
///
/// By default, they are not.
#[must_use]
pub fn auto_add_history(mut self, yes: bool) -> Self {
self.set_auto_add_history(yes);
self
}
/// Set bell style: beep, flash or nothing.
#[must_use]
pub fn bell_style(mut self, bell_style: BellStyle) -> Self {
self.set_bell_style(bell_style);
self
}
/// Forces colorization on or off.
///
/// By default, colorization is on except if stdout is not a TTY.
#[must_use]
pub fn color_mode(mut self, color_mode: ColorMode) -> Self {
self.set_color_mode(color_mode);
self
}
/// Whether to use stdio or not
///
/// By default, stdio is used.
#[must_use]
pub fn behavior(mut self, behavior: Behavior) -> Self {
self.set_behavior(behavior);
self
}
/// Horizontal space taken by a tab.
///
/// By default, `8`
#[must_use]
pub fn tab_stop(mut self, tab_stop: usize) -> Self {
self.set_tab_stop(tab_stop);
self
}
/// Check if cursor position is at leftmost before displaying prompt.
///
/// By default, we don't check.
#[must_use]
pub fn check_cursor_position(mut self, yes: bool) -> Self {
self.set_check_cursor_position(yes);
self
}
/// Indentation size
///
/// By default, `2`
#[must_use]
pub fn indent_size(mut self, indent_size: usize) -> Self {
self.set_indent_size(indent_size);
self
}
/// Enable or disable bracketed paste on unix platform
///
/// By default, it's enabled.
#[must_use]
pub fn bracketed_paste(mut self, enabled: bool) -> Self {
self.enable_bracketed_paste(enabled);
self
}
/// Builds a `Config` with the settings specified so far.
#[must_use]
pub fn build(self) -> Config {
self.p
}
}
impl Configurer for Builder {
fn config_mut(&mut self) -> &mut Config {
&mut self.p
}
}
/// Trait for component that holds a `Config`.
pub trait Configurer {
/// `Config` accessor.
fn config_mut(&mut self) -> &mut Config;
/// Set the maximum length for the history.
fn set_max_history_size(&mut self, max_size: usize) {
self.config_mut().set_max_history_size(max_size);
}
/// Tell if lines which match the previous history entry are saved or not
/// in the history list.
///
/// By default, they are ignored.
fn set_history_ignore_dups(&mut self, yes: bool) {
self.config_mut().set_history_ignore_dups(yes);
}
/// Tell if lines which begin with a space character are saved or not in
/// the history list.
///
/// By default, they are saved.
fn set_history_ignore_space(&mut self, yes: bool) {
self.config_mut().set_history_ignore_space(yes);
}
/// Set `completion_type`.
fn set_completion_type(&mut self, completion_type: CompletionType) {
self.config_mut().completion_type = completion_type;
}
/// The number of possible completions that determines when the user is
/// asked whether the list of possibilities should be displayed.
fn set_completion_prompt_limit(&mut self, completion_prompt_limit: usize) {
self.config_mut().completion_prompt_limit = completion_prompt_limit;
}
/// Timeout for ambiguous key sequences in milliseconds.
fn set_keyseq_timeout(&mut self, keyseq_timeout_ms: i32) {
self.config_mut().keyseq_timeout = keyseq_timeout_ms;
}
/// Choose between Emacs or Vi mode.
fn set_edit_mode(&mut self, edit_mode: EditMode) {
self.config_mut().edit_mode = edit_mode;
match edit_mode {
EditMode::Emacs => self.set_keyseq_timeout(-1), // no timeout
EditMode::Vi => self.set_keyseq_timeout(500),
}
}
/// Tell if lines are automatically added to the history.
///
/// By default, they are not.
fn set_auto_add_history(&mut self, yes: bool) {
self.config_mut().auto_add_history = yes;
}
/// Set bell style: beep, flash or nothing.
fn set_bell_style(&mut self, bell_style: BellStyle) {
self.config_mut().bell_style = bell_style;
}
/// Forces colorization on or off.
///
/// By default, colorization is on except if stdout is not a TTY.
fn set_color_mode(&mut self, color_mode: ColorMode) {
self.config_mut().set_color_mode(color_mode);
}
/// Whether to use stdio or not
///
/// By default, stdio is used.
fn set_behavior(&mut self, behavior: Behavior) {
self.config_mut().set_behavior(behavior);
}
/// Horizontal space taken by a tab.
///
/// By default, `8`
fn set_tab_stop(&mut self, tab_stop: usize) {
self.config_mut().set_tab_stop(tab_stop);
}
/// Check if cursor position is at leftmost before displaying prompt.
///
/// By default, we don't check.
fn set_check_cursor_position(&mut self, yes: bool) {
self.config_mut().check_cursor_position = yes;
}
/// Indentation size for indent/dedent commands
///
/// By default, `2`
fn | (&mut self, size: usize) {
self.config_mut().set_indent_size(size);
}
/// Enable or disable bracketed paste on unix platform
///
/// By default, it's enabled.
fn enable_bracketed_paste(&mut self, enabled: bool) {
self.config_mut().enable_bracketed_paste = enabled;
}
}
| set_indent_size |
1560917264.js | // This file was generated by purescript-docs-search | window.DocsSearchTypeIndex["1560917264"] = [{"values":[{"sourceSpan":{"start":[200,1],"name":".spago/either/v4.1.1/src/Data/Either/Nested.purs","end":[200,162]},"score":27,"packageInfo":{"values":["either"],"tag":"Package"},"name":"either8","moduleName":"Data.Either.Nested","info":{"values":[{"type":{"tag":"ForAll","contents":["h",{"tag":"ForAll","contents":["g",{"tag":"ForAll","contents":["f",{"tag":"ForAll","contents":["e",{"tag":"ForAll","contents":["d",{"tag":"ForAll","contents":["c",{"tag":"ForAll","contents":["b",{"tag":"ForAll","contents":["a",{"tag":"ForAll","contents":["r",{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"ParensInType","contents":{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"TypeVar","contents":"a"}]},{"tag":"TypeVar","contents":"r"}]}}]},{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"ParensInType","contents":{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"TypeVar","contents":"b"}]},{"tag":"TypeVar","contents":"r"}]}}]},{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"ParensInType","contents":{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"TypeVar","contents":"c"}]},{"tag":"TypeVar","contents":"r"}]}}]},{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"ParensInType","contents":{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"TypeVar","contents":"d"}]},{"tag":"TypeVar","contents":"r"}]}}]},{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"ParensInType","contents":{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"TypeVar","contents":"e"}]},{"tag":"TypeVar","contents":"r"}]}}]},{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"ParensInType","contents":{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"TypeVar","contents":"f"}]},{"tag":"TypeVar","contents":"r"}]}}]},{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"ParensInType","contents":{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"TypeVar","contents":"g"}]},{"tag":"TypeVar","contents":"r"}]}}]},{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"ParensInType","contents":{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"TypeVar","contents":"h"}]},{"tag":"TypeVar","contents":"r"}]}}]},{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Prim"],"Function"]},{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeApp","contents":[{"tag":"TypeConstructor","contents":[["Data","Either","Nested"],"Either8"]},{"tag":"TypeVar","contents":"a"}]},{"tag":"TypeVar","contents":"b"}]},{"tag":"TypeVar","contents":"c"}]},{"tag":"TypeVar","contents":"d"}]},{"tag":"TypeVar","contents":"e"}]},{"tag":"TypeVar","contents":"f"}]},{"tag":"TypeVar","contents":"g"}]},{"tag":"TypeVar","contents":"h"}]}]},{"tag":"TypeVar","contents":"r"}]}]}]}]}]}]}]}]}]},null]},null]},null]},null]},null]},null]},null]},null]},null]}}],"tag":"ValueResult"},"hashAnchor":"v","comments":null}],"tag":"SearchResult"}] |
|
indent.rs | use std::str::FromStr;
use xml::attribute::OwnedAttribute;
use crate::types::*;
use super::super::errors::*;
pub type ReadIndentResult = Result<
(
Option<i32>,
Option<i32>,
Option<SpecialIndentType>,
Option<i32>,
),
ReaderError,
>;
pub fn | (attrs: &[OwnedAttribute]) -> ReadIndentResult {
let mut start: Option<i32> = None;
let mut start_chars: Option<i32> = None;
let mut end: Option<i32> = None;
let mut special: Option<SpecialIndentType> = None;
for a in attrs {
let local_name = &a.name.local_name;
if local_name == "left" || local_name == "start" {
let v = super::value_to_dax(&a.value)?;
start = Some(v);
} else if local_name == "leftChars" || local_name == "startChars" {
start_chars = Some(i32::from_str(&a.value)?);
} else if local_name == "end" || local_name == "right" {
let v = super::value_to_dax(&a.value)?;
end = Some(v);
} else if local_name == "hanging" {
let v = super::value_to_dax(&a.value)?;
special = Some(SpecialIndentType::Hanging(v))
} else if local_name == "firstLine" {
let v = super::value_to_dax(&a.value)?;
special = Some(SpecialIndentType::FirstLine(v))
}
}
Ok((start, end, special, start_chars))
}
| read_indent |
_data.ts | dayjs.extend(customParseFormat);
export const fatalities = fatalitiesData
.map(({ age, dateOfIncident, ...data }) => ({
age: +age || 9999,
dateOfIncident: dayjs(dateOfIncident || new Date(), 'DD-MMM-YY'),
...data
}))
.sort((a, z) =>
a.dateOfIncident.format('YYMMDD').localeCompare(z.dateOfIncident.format('YYMMDD'))
)
.map((data, index) => ({
id: index,
...data
}));
export type Person = typeof fatalities[0]; | import dayjs from 'dayjs';
import customParseFormat from 'dayjs/plugin/customParseFormat.js';
import fatalitiesData from '../../data/myanmar-coup/recent-fatality.csv';
|
|
slide-toggle.component.ts | // -- copyright
// OpenProject is an open source project management software.
// Copyright (C) 2012-2021 the OpenProject GmbH
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License version 3.
//
// OpenProject is a fork of ChiliProject, which is a fork of Redmine. The copyright follows:
// Copyright (C) 2006-2013 Jean-Philippe Lang
// Copyright (C) 2010-2013 the ChiliProject Team
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
//
// See docs/COPYRIGHT.rdoc for more details.
//++
import {
ChangeDetectionStrategy,
ChangeDetectorRef,
Component,
ElementRef,
EventEmitter,
Input,
OnChanges,
OnInit,
Output,
SimpleChanges,
} from '@angular/core';
export const slideToggleSelector = 'slide-toggle';
@Component({
templateUrl: './slide-toggle.component.html',
selector: slideToggleSelector,
styleUrls: ['./slide-toggle.component.sass'],
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class | implements OnInit, OnChanges {
@Input() containerId:string;
@Input() containerClasses:string;
@Input() inputId:string;
@Input() inputName:string;
@Input() active:boolean;
@Output() valueChanged = new EventEmitter();
constructor(private elementRef:ElementRef,
private cdRef:ChangeDetectorRef) {
}
ngOnChanges(changes:SimpleChanges) {
console.warn(JSON.stringify(changes));
}
ngOnInit() {
const { dataset } = this.elementRef.nativeElement;
// Allow taking over values from dataset (Rails)
if (dataset.inputName) {
this.containerId = dataset.containerId;
this.containerClasses = dataset.containerClasses;
this.inputId = dataset.inputId;
this.inputName = dataset.inputName;
this.active = dataset.active.toString() === 'true';
}
}
public onValueChanged(val:any) {
this.active = val;
this.valueChanged.emit(val);
this.cdRef.detectChanges();
}
}
| SlideToggleComponent |
extendable.js | define([
], function (
) {
'use strict'; |
// this looks odd, but we'll be added other properties in future - validators etc
return [ 'data', 'computed' ];
}); | |
GetListByID.py | # -*- coding: utf-8 -*-
###############################################################################
#
# GetListByID
# Retrieves a list of NPR categories from a specified list type ID.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetListByID(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetListByID Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetListByID, self).__init__(temboo_session, '/Library/NPR/StoryFinder/GetListByID')
def new_input_set(self):
return GetListByIDInputSet()
def _make_result_set(self, result, path):
return GetListByIDResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetListByIDChoreographyExecution(session, exec_id, path)
class GetListByIDInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetListByID
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_ChildrenOf(self, value):
|
def set_HideChildren(self, value):
"""
Set the value of the HideChildren input for this Choreo. ((optional, boolean) If set to "1", returns only topics which are not subtopics of another topic.)
"""
super(GetListByIDInputSet, self)._set_input('HideChildren', value)
def set_Id(self, value):
"""
Set the value of the Id input for this Choreo. ((required, integer) The id of the list type you want to retrieve. For example, the list type id for Music Genres is 3218).)
"""
super(GetListByIDInputSet, self)._set_input('Id', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are xml (the default), and json.)
"""
super(GetListByIDInputSet, self)._set_input('ResponseFormat', value)
def set_StoryCountAll(self, value):
"""
Set the value of the StoryCountAll input for this Choreo. ((optional, integer) Returns only items with at least this number of associated stories.)
"""
super(GetListByIDInputSet, self)._set_input('StoryCountAll', value)
def set_StoryCountMonth(self, value):
"""
Set the value of the StoryCountMonth input for this Choreo. ((optional, integer) Returns only items with at least this number of associated stories published in the last month.)
"""
super(GetListByIDInputSet, self)._set_input('StoryCountMonth', value)
def set_StoryCountToday(self, value):
"""
Set the value of the StoryCountToday input for this Choreo. ((optional, integer) Returns only items with at least this number of associated stories published today.)
"""
super(GetListByIDInputSet, self)._set_input('StoryCountToday', value)
class GetListByIDResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetListByID Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from NPR.)
"""
return self._output.get('Response', None)
class GetListByIDChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetListByIDResultSet(response, path)
| """
Set the value of the ChildrenOf input for this Choreo. ((optional, integer) Returns only items which are assigned to the given topic ID. For example, if Id=3006 and ChildrenOf=1008 only recent series which are assigned to "Arts & Life" are returned.)
"""
super(GetListByIDInputSet, self)._set_input('ChildrenOf', value) |
resource.rs | use crate::utility::pixel::Pixel;
| pub file_num: u32,
pub index: u32,
pub offset: u32,
pub len: u32,
pub offset_x: i32,
pub offset_y: i32,
pub width: i32,
pub height: i32,
pub unknown_1: u32,
pub unknown_2: u32,
pub unknown_3: u32,
pub unknown_4: u32,
// pub image: Vec<Pixel>,
pub image_raw: Vec<u8>,
}
impl Resource {
pub fn new() -> Resource {
Resource {
file_num: 0,
index: 0,
offset: 0,
len: 0,
offset_x: 0,
offset_y: 0,
width: 0,
height: 0,
unknown_1: 0,
unknown_2: 0,
unknown_3: 0,
unknown_4: 0,
image_raw: Vec::new(),
}
}
} | #[derive(Debug)]
pub struct Resource {
|
utils_suite_test.go | package syncer_test
import (
"testing"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega"
)
func TestSyncer(t *testing.T) | {
RegisterFailHandler(Fail)
junitReporter := reporters.NewJUnitReporter("junit.xml")
RunSpecsWithDefaultAndCustomReporters(t, "Syncer Utils Suite", []Reporter{junitReporter})
} |
|
swap-case.py | #!/bin/python3
# Swaps case of all chars in provided string
def | (s):
formattedStr = "".join(map(swapChar, s))
return formattedStr
def swapChar(char):
if char.islower():
return char.upper()
else:
return char.lower()
n=input()
if len(n)==1:
print(swapChar(n))
else:
print(swap_case(n))
| swap_case |
serializers.py | from rest_framework import serializers
import markdown2
from .models import Content
from omaralbeik import server_variables as sv
class ContentSerializer(serializers.ModelSerializer):
tags = serializers.SerializerMethodField()
html_text = serializers.SerializerMethodField()
website_url = serializers.SerializerMethodField()
meta = serializers.SerializerMethodField()
class Meta:
model = Content
fields = ( | "title",
"slug",
"image_url",
"summary",
"text",
"html_text",
"website_url",
"tags",
"meta",
)
# return content's web URL.
def get_website_url(self, content):
return "{}/{}".format(sv.CLIENT_PROD_URL, content.slug)
# return content's text as HTML
def get_html_text(self, content):
return markdown2.markdown(
content.text, extras=["target-blank-links", "fenced-code-blocks"]
)
# return content's tags.
def get_tags(self, content):
return content.tags.all().values("name", "slug")
# return content's meta fields.
def get_meta(self, content):
return {
"title": content.title,
"description": content.summary,
"keywords": ", ".join([tag.name for tag in content.tags.all()]),
"canonical": self.get_website_url(content),
} | "id", |
app.py | #!/usr/bin/env python
import os
import web
import requests
import json
from config import CONFIG_FILE, DEBUG, SENSU_API_URI, SENSU_API_USER, SENSU_API_PASS, load_config, validate_api_key
# SHA2
urls = (
'/', 'Index',
'/results/([A-Fa-f0-9]{64})', 'CheckCollector'
)
api_config = load_config(CONFIG_FILE)
class Index(object):
def GET(self):
|
class CheckCollector(object):
def GET(self, api_key):
return web.nomethod()
def POST(self, api_key):
try:
data = json.loads(web.data())
if DEBUG: print(json.dumps(data))
except ValueError as e:
raise web.badrequest('Invalid JSON request data')
if not validate_api_key(api_key, api_config, data):
raise web.forbidden('Invalid API Key')
try:
headers = {'Content-type': 'application/json'}
if SENSU_API_USER and SENSU_API_PASS:
if DEBUG: print "AUTH: SENSU_API_USER, XXX"
auth=(SENSU_API_USER, SENSU_API_PASS)
else:
auth=None
r = requests.post(SENSU_API_URI, json=data, headers=headers, auth=auth)
r.raise_for_status()
return web.accepted()
except requests.exceptions.RequestException as e:
print(e)
raise web.internalerror('RequestException calling Sensu')
if __name__ == "__main__":
app = web.application(urls, globals())
web.config.debug = DEBUG
app.run()
| return 'Welcome to the Sensu Check Collector!' |
file.go | // Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// Package file provides helper functions for using Google Cloud Storage.
package file
import (
"fmt"
"github.com/sunnogo/net/context"
"github.com/sunnogo/appengine/internal"
aipb "github.com/sunnogo/appengine/internal/app_identity"
)
// DefaultBucketName returns the name of this application's
// default Google Cloud Storage bucket.
func DefaultBucketName(c context.Context) (string, error) | {
req := &aipb.GetDefaultGcsBucketNameRequest{}
res := &aipb.GetDefaultGcsBucketNameResponse{}
err := internal.Call(c, "app_identity_service", "GetDefaultGcsBucketName", req, res)
if err != nil {
return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res)
}
return res.GetDefaultGcsBucketName(), nil
} |
|
overing.go | package swp
import (
"fmt"
"io"
"time"
)
// EventRingBuf:
//
// a fixed-size circular ring buffer of interface{}
//
type EventRingBuf struct {
A []time.Time
N int // MaxView, the total size of A, whether or not in use.
Window time.Duration
Beg int // start of in-use data in A
Readable int // number of pointers available in A (in use)
}
func (r *EventRingBuf) String() string {
c1, c2 := r.TwoContig(false)
s := ""
for i := range c1 {
s += fmt.Sprintf("%v, ", c1[i])
}
for i := range c2 {
s += fmt.Sprintf("%v, ", c2[i])
}
return s
}
// AddEventCheckOverflow adds event e and removes any events that are older
// than event e in the ring by more than Window.
// Returns true if the ring is full after the addition of e.
// Hence a true response indicates we have seen N events
// within Window of time.
func (b *EventRingBuf) AddEventCheckOverflow(e time.Time) bool {
writeStart := (b.Beg + b.Readable) % b.N
youngest := writeStart
space := b.N - b.Readable
//p("youngest = %v", youngest)
b.A[youngest] = e
if space == 0 {
b.Beg = (b.Beg + 1) % b.N
} else {
b.Readable++
}
oldest := b.Beg
left := b.N - b.Readable
if left > 0 {
// haven't seen N events yet,
// so can't overflow
return false
}
// at capacity, check time distance from newest to oldest
if b.A[youngest].Sub(b.A[oldest]) <= b.Window {
//p("b.A[youngest]=%v b.A[oldest]=%v b.Window = %v, sub=%v", b.A[youngest], b.A[oldest], b.Window, b.A[youngest].Sub(b.A[oldest]))
return true
}
return false
}
// constructor. NewEventRingBuf will allocate internally
// a slice of size maxViewInBytes.
func NewEventRingBuf(maxEventsStored int, maxTimeWindow time.Duration) *EventRingBuf {
n := maxEventsStored
r := &EventRingBuf{
N: n,
Beg: 0,
Readable: 0,
Window: maxTimeWindow,
}
r.A = make([]time.Time, n, n+1)
return r
}
// TwoContig returns all readable pointers, but in two separate slices,
// to avoid copying. The two slices are from the same buffer, but
// are not contiguous. Either or both may be empty slices.
func (b *EventRingBuf) TwoContig(makeCopy bool) (first []time.Time, second []time.Time) {
extent := b.Beg + b.Readable
if extent <= b.N {
// we fit contiguously in this buffer without wrapping to the other.
// Let second stay an empty slice.
return b.A[b.Beg:(b.Beg + b.Readable)], second
}
one := b.A[b.Beg:b.N]
two := b.A[0:(extent % b.N)]
return one, two
}
// ReadPtrs():
//
// from bytes.Buffer.Read(): Read reads the next len(p) time.Time
// pointers from the buffer or until the buffer is drained. The return
// value n is the number of bytes read. If the buffer has no data
// to return, err is io.EOF (unless len(p) is zero); otherwise it is nil.
func (b *EventRingBuf) ReadPtrs(p []time.Time) (n int, err error) {
return b.readAndMaybeAdvance(p, true)
}
// ReadWithoutAdvance(): if you want to Read the data and leave
// it in the buffer, so as to peek ahead for example.
func (b *EventRingBuf) ReadWithoutAdvance(p []time.Time) (n int, err error) {
return b.readAndMaybeAdvance(p, false)
}
func (b *EventRingBuf) readAndMaybeAdvance(p []time.Time, doAdvance bool) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
if b.Readable == 0 {
return 0, io.EOF
}
extent := b.Beg + b.Readable
if extent <= b.N {
n += copy(p, b.A[b.Beg:extent])
} else {
n += copy(p, b.A[b.Beg:b.N])
if n < len(p) {
n += copy(p[n:], b.A[0:(extent%b.N)]) | }
return
}
//
// WritePtrs writes len(p) time.Time values from p to
// the underlying ring, b.A.
// It returns the number of bytes written from p (0 <= n <= len(p))
// and any error encountered that caused the write to stop early.
// Write must return a non-nil error if it returns n < len(p).
//
func (b *EventRingBuf) WritePtrs(p []time.Time) (n int, err error) {
for {
if len(p) == 0 {
// nothing (left) to copy in; notice we shorten our
// local copy p (below) as we read from it.
return
}
writeCapacity := b.N - b.Readable
if writeCapacity <= 0 {
// we are all full up already.
return n, io.ErrShortWrite
}
if len(p) > writeCapacity {
err = io.ErrShortWrite
// leave err set and
// keep going, write what we can.
}
writeStart := (b.Beg + b.Readable) % b.N
upperLim := intMin(writeStart+writeCapacity, b.N)
k := copy(b.A[writeStart:upperLim], p)
n += k
b.Readable += k
p = p[k:]
// we can fill from b.A[0:something] from
// p's remainder, so loop
}
}
// Reset quickly forgets any data stored in the ring buffer. The
// data is still there, but the ring buffer will ignore it and
// overwrite those buffers as new data comes in.
func (b *EventRingBuf) Reset() {
b.Beg = 0
b.Readable = 0
}
// Advance(): non-standard, but better than Next(),
// because we don't have to unwrap our buffer and pay the cpu time
// for the copy that unwrapping may need.
// Useful in conjuction/after ReadWithoutAdvance() above.
func (b *EventRingBuf) Advance(n int) {
if n <= 0 {
return
}
if n > b.Readable {
n = b.Readable
}
b.Readable -= n
b.Beg = (b.Beg + n) % b.N
}
// Adopt(): non-standard.
//
// For efficiency's sake, (possibly) take ownership of
// already allocated slice offered in me.
//
// If me is large we will adopt it, and we will potentially then
// write to the me buffer.
// If we already have a bigger buffer, copy me into the existing
// buffer instead.
func (b *EventRingBuf) Adopt(me []time.Time) {
n := len(me)
if n > b.N {
b.A = me
b.N = n
b.Beg = 0
b.Readable = n
} else {
// we already have a larger buffer, reuse it.
copy(b.A, me)
b.Beg = 0
b.Readable = n
}
}
func intMax(a, b int) int {
if a > b {
return a
} else {
return b
}
}
func intMin(a, b int) int {
if a < b {
return a
} else {
return b
}
} | }
}
if doAdvance {
b.Advance(n) |
index.tsx | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import React from "react";
import type { Props } from "@theme/IconLanguage";
export default function | ({
width = 20,
height = 20,
...props
}: Props): JSX.Element {
return (
<svg
xmlns="http://www.w3.org/2000/svg"
width={width}
height={height}
viewBox="0 0 24 24"
fill="#000000"
aria-hidden
{...props}>
<path d="M0 0h24v24H0V0z" fill="none" />
<path
fill="currentColor"
d="M12.87 15.07l-2.54-2.51.03-.03c1.74-1.94 2.98-4.17 3.71-6.53H17V4h-7V2H8v2H1v1.99h11.17C11.5 7.92 10.44 9.75 9 11.35 8.07 10.32 7.3 9.19 6.69 8h-2c.73 1.63 1.73 3.17 2.98 4.56l-5.09 5.02L4 19l5-5 3.11 3.11.76-2.04zM18.5 10h-2L12 22h2l1.12-3h4.75L21 22h2l-4.5-12zm-2.62 7l1.62-4.33L19.12 17h-3.24z"
/>
</svg>
);
}
| IconLanguage |
restore.rs | // Copyright 2018 The Grin Developers
// Modifications Copyright 2019 The Gotts Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! tests for wallet restore
#[macro_use]
extern crate log;
extern crate gotts_wallet_controller as wallet;
extern crate gotts_wallet_impls as impls;
extern crate gotts_wallet_libwallet as libwallet;
use gotts_wallet_util::gotts_core as core;
use gotts_wallet_util::gotts_keychain as keychain;
use gotts_wallet_util::gotts_util as util;
use self::core::global;
use self::core::global::ChainTypes;
use self::keychain::{ExtKeychain, Identifier, Keychain};
use self::libwallet::{AcctPathMapping, InitTxArgs, Slate};
use impls::test_framework::{self, LocalWalletClient, WalletProxy};
use std::fs;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
fn clean_output_dir(test_dir: &str) {
let _ = fs::remove_dir_all(test_dir);
}
fn | (test_dir: &str) {
util::init_test_logger();
clean_output_dir(test_dir);
global::set_mining_mode(ChainTypes::AutomatedTesting);
}
fn restore_wallet(base_dir: &str, wallet_dir: &str) -> Result<(), libwallet::Error> {
println!("restoring wallet: {}", wallet_dir);
let source_seed = format!("{}/{}/wallet.seed", base_dir, wallet_dir);
let dest_dir = format!("{}/{}_restore", base_dir, wallet_dir);
fs::create_dir_all(dest_dir.clone())?;
let dest_seed = format!("{}/wallet.seed", dest_dir);
println!("Source: {}, Dest: {}", source_seed, dest_seed);
fs::copy(source_seed, dest_seed)?;
let mut wallet_proxy: WalletProxy<LocalWalletClient, ExtKeychain> = WalletProxy::new(base_dir);
let client = LocalWalletClient::new(wallet_dir, wallet_proxy.tx.clone());
let wallet = test_framework::create_wallet(&dest_dir, client.clone(), None);
wallet_proxy.add_wallet(wallet_dir, client.get_send_instance(), wallet.clone());
// Set the wallet proxy listener running
let wp_running = wallet_proxy.running.clone();
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!("Wallet Proxy error: {}", e);
}
});
// perform the restore and update wallet info
wallet::controller::owner_single_use(wallet.clone(), |api| {
println!("restoring...");
let _ = api.restore()?;
let _ = api.retrieve_summary_info(true, 1)?;
println!("checking account1 ...");
if api.set_active_account("account1").is_err() {
api.create_account("account1")?;
api.set_active_account("account1")?;
}
api.check_repair(true, 0, None)?;
println!("checking account2 ...");
if api.set_active_account("account2").is_err() {
api.create_account("account2")?;
api.set_active_account("account2")?;
}
api.check_repair(true, 0, None)?;
Ok(())
})?;
wp_running.store(false, Ordering::Relaxed);
//thread::sleep(Duration::from_millis(1000));
Ok(())
}
fn compare_wallet_restore(
base_dir: &str,
wallet_dir: &str,
account_path: &Identifier,
) -> Result<(), libwallet::Error> {
let restore_name = format!("{}_restore", wallet_dir);
let source_dir = format!("{}/{}", base_dir, wallet_dir);
let dest_dir = format!("{}/{}", base_dir, restore_name);
let mut wallet_proxy: WalletProxy<LocalWalletClient, ExtKeychain> = WalletProxy::new(base_dir);
let client = LocalWalletClient::new(wallet_dir, wallet_proxy.tx.clone());
let wallet_source = test_framework::create_wallet(&source_dir, client.clone(), None);
wallet_proxy.add_wallet(
&wallet_dir,
client.get_send_instance(),
wallet_source.clone(),
);
let client = LocalWalletClient::new(&restore_name, wallet_proxy.tx.clone());
let wallet_dest = test_framework::create_wallet(&dest_dir, client.clone(), None);
wallet_proxy.add_wallet(
&restore_name,
client.get_send_instance(),
wallet_dest.clone(),
);
{
let mut w = wallet_source.lock();
w.set_parent_key_id(account_path.clone());
}
{
let mut w = wallet_dest.lock();
w.set_parent_key_id(account_path.clone());
}
// Set the wallet proxy listener running
let wp_running = wallet_proxy.running.clone();
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!("Wallet Proxy error: {}", e);
}
});
let mut src_info: Option<libwallet::WalletInfo> = None;
let mut dest_info: Option<libwallet::WalletInfo> = None;
let mut src_txs: Option<Vec<libwallet::TxLogEntry>> = None;
let mut dest_txs: Option<Vec<libwallet::TxLogEntry>> = None;
let mut src_accts: Option<Vec<AcctPathMapping>> = None;
let mut dest_accts: Option<Vec<AcctPathMapping>> = None;
// Overall wallet info should be the same
wallet::controller::owner_single_use(wallet_source.clone(), |api| {
src_info = Some(api.retrieve_summary_info(true, 1)?.1);
src_txs = Some(api.retrieve_txs(true, None, None)?.1);
src_accts = Some(api.accounts()?);
Ok(())
})?;
wallet::controller::owner_single_use(wallet_dest.clone(), |api| {
dest_info = Some(api.retrieve_summary_info(true, 1)?.1);
dest_txs = Some(api.retrieve_txs(true, None, None)?.1);
dest_accts = Some(api.accounts()?);
Ok(())
})?;
// Info should all be the same
assert_eq!(src_info, dest_info);
// Net differences in TX logs should be the same
let src_sum: i64 = src_txs
.clone()
.unwrap()
.iter()
.map(|t| t.amount_credited as i64 - t.amount_debited as i64)
.sum();
let dest_sum: i64 = dest_txs
.clone()
.unwrap()
.iter()
.map(|t| t.amount_credited as i64 - t.amount_debited as i64)
.sum();
assert_eq!(src_sum, dest_sum);
// Number of created accounts should be the same
assert_eq!(
src_accts.as_ref().unwrap().len(),
dest_accts.as_ref().unwrap().len()
);
wp_running.store(false, Ordering::Relaxed);
//thread::sleep(Duration::from_millis(1000));
Ok(())
}
/// Build up 2 wallets, perform a few transactions on them
/// Then attempt to restore them in separate directories and check contents are the same
fn setup_restore(test_dir: &str) -> Result<(), libwallet::Error> {
setup(test_dir);
// Create a new proxy to simulate server and wallet responses
let mut wallet_proxy: WalletProxy<LocalWalletClient, ExtKeychain> = WalletProxy::new(test_dir);
let chain = wallet_proxy.chain.clone();
// Create a new wallet test client, and set its queues to communicate with the
// proxy
let client1 = LocalWalletClient::new("wallet1", wallet_proxy.tx.clone());
let wallet1 =
test_framework::create_wallet(&format!("{}/wallet1", test_dir), client1.clone(), None);
wallet_proxy.add_wallet("wallet1", client1.get_send_instance(), wallet1.clone());
// wallet 1 create 2 more accounts but not used.
wallet::controller::owner_single_use(wallet1.clone(), |api| {
api.create_account("account1")?;
api.create_account("account2")?;
Ok(())
})?;
// define recipient wallet, add to proxy
let client2 = LocalWalletClient::new("wallet2", wallet_proxy.tx.clone());
let wallet2 =
test_framework::create_wallet(&format!("{}/wallet2", test_dir), client2.clone(), None);
wallet_proxy.add_wallet("wallet2", client2.get_send_instance(), wallet2.clone());
// wallet 2 will use another account
wallet::controller::owner_single_use(wallet2.clone(), |api| {
api.create_account("account1")?;
api.create_account("account2")?;
Ok(())
})?;
// Default wallet 2 to listen on that account
{
let mut w = wallet2.lock();
w.set_parent_key_id_by_name("account1")?;
}
// Another wallet
let client3 = LocalWalletClient::new("wallet3", wallet_proxy.tx.clone());
let wallet3 =
test_framework::create_wallet(&format!("{}/wallet3", test_dir), client3.clone(), None);
wallet_proxy.add_wallet("wallet3", client3.get_send_instance(), wallet3.clone());
// wallet 3 create 2 more accounts but not used.
wallet::controller::owner_single_use(wallet3.clone(), |api| {
api.create_account("account1")?;
api.create_account("account2")?;
Ok(())
})?;
// Set the wallet proxy listener running
let wp_running = wallet_proxy.running.clone();
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!("Wallet Proxy error: {}", e);
}
});
// mine a few blocks
let _ = test_framework::award_blocks_to_wallet(&chain, wallet1.clone(), 10, false);
// assert wallet contents
// and a single use api for a send command
let amount = 60_000_000_000;
let mut slate = Slate::blank(1);
wallet::controller::owner_single_use(wallet1.clone(), |sender_api| {
// note this will increment the block count as part of the transaction "Posting"
let args = InitTxArgs {
src_acct_name: None,
amount: amount,
minimum_confirmations: 2,
max_outputs: 500,
num_change_outputs: 1,
selection_strategy: "all".to_owned(),
..Default::default()
};
let slate_i = sender_api.init_send_tx(args)?;
slate = client1.send_tx_slate_direct("wallet2", &slate_i)?;
sender_api.tx_lock_outputs(&slate, 0)?;
slate = sender_api.finalize_tx(&slate)?;
sender_api.post_tx(Some(slate.id), &slate.tx, false)?;
Ok(())
})?;
// mine a few more blocks
let _ = test_framework::award_blocks_to_wallet(&chain, wallet1.clone(), 3, false);
// Send some to wallet 3
wallet::controller::owner_single_use(wallet1.clone(), |sender_api| {
// note this will increment the block count as part of the transaction "Posting"
let args = InitTxArgs {
src_acct_name: None,
amount: amount * 2,
minimum_confirmations: 2,
max_outputs: 500,
num_change_outputs: 1,
selection_strategy: "all".to_owned(),
..Default::default()
};
let slate_i = sender_api.init_send_tx(args)?;
slate = client1.send_tx_slate_direct("wallet3", &slate_i)?;
sender_api.tx_lock_outputs(&slate, 0)?;
slate = sender_api.finalize_tx(&slate)?;
sender_api.post_tx(Some(slate.id), &slate.tx, false)?;
Ok(())
})?;
// mine a few more blocks
let _ = test_framework::award_blocks_to_wallet(&chain, wallet3.clone(), 10, false);
// Wallet3 to wallet 2
wallet::controller::owner_single_use(wallet3.clone(), |sender_api| {
// note this will increment the block count as part of the transaction "Posting"
let args = InitTxArgs {
src_acct_name: None,
amount: amount * 3,
minimum_confirmations: 2,
max_outputs: 500,
num_change_outputs: 1,
selection_strategy: "all".to_owned(),
..Default::default()
};
let slate_i = sender_api.init_send_tx(args)?;
slate = client3.send_tx_slate_direct("wallet2", &slate_i)?;
sender_api.tx_lock_outputs(&slate, 0)?;
slate = sender_api.finalize_tx(&slate)?;
sender_api.post_tx(Some(slate.id), &slate.tx, false)?;
Ok(())
})?;
// Another listener account on wallet 2
{
let mut w = wallet2.lock();
w.set_parent_key_id_by_name("account2")?;
}
// mine a few more blocks
let _ = test_framework::award_blocks_to_wallet(&chain, wallet1.clone(), 2, false);
// Wallet3 to wallet 2 again (to another account)
wallet::controller::owner_single_use(wallet3.clone(), |sender_api| {
// note this will increment the block count as part of the transaction "Posting"
let args = InitTxArgs {
src_acct_name: None,
amount: amount * 3,
minimum_confirmations: 2,
max_outputs: 500,
num_change_outputs: 1,
selection_strategy: "all".to_owned(),
..Default::default()
};
let slate_i = sender_api.init_send_tx(args)?;
slate = client3.send_tx_slate_direct("wallet2", &slate_i)?;
sender_api.tx_lock_outputs(&slate, 0)?;
slate = sender_api.finalize_tx(&slate)?;
sender_api.post_tx(Some(slate.id), &slate.tx, false)?;
Ok(())
})?;
// mine a few more blocks
let _ = test_framework::award_blocks_to_wallet(&chain, wallet1.clone(), 5, false);
// update everyone
wallet::controller::owner_single_use(wallet1.clone(), |api| {
let _ = api.retrieve_summary_info(true, 1)?;
Ok(())
})?;
wallet::controller::owner_single_use(wallet2.clone(), |api| {
let _ = api.retrieve_summary_info(true, 1)?;
Ok(())
})?;
wallet::controller::owner_single_use(wallet3.clone(), |api| {
let _ = api.retrieve_summary_info(true, 1)?;
Ok(())
})?;
wp_running.store(false, Ordering::Relaxed);
Ok(())
}
fn perform_restore(test_dir: &str) -> Result<(), libwallet::Error> {
restore_wallet(test_dir, "wallet1")?;
compare_wallet_restore(
test_dir,
"wallet1",
&ExtKeychain::derive_key_id(2, 0, 0, 0, 0),
)?;
restore_wallet(test_dir, "wallet2")?;
compare_wallet_restore(
test_dir,
"wallet2",
&ExtKeychain::derive_key_id(2, 0, 0, 0, 0),
)?;
compare_wallet_restore(
test_dir,
"wallet2",
&ExtKeychain::derive_key_id(2, 1, 0, 0, 0),
)?;
compare_wallet_restore(
test_dir,
"wallet2",
&ExtKeychain::derive_key_id(2, 2, 0, 0, 0),
)?;
restore_wallet(test_dir, "wallet3")?;
compare_wallet_restore(
test_dir,
"wallet3",
&ExtKeychain::derive_key_id(2, 0, 0, 0, 0),
)?;
Ok(())
}
#[test]
fn wallet_restore() {
let test_dir = "test_output/wallet_restore";
if let Err(e) = setup_restore(test_dir) {
panic!("Libwallet Error: {} - {}", e, e.backtrace().unwrap());
}
if let Err(e) = perform_restore(test_dir) {
panic!("Libwallet Error: {} - {}", e, e.backtrace().unwrap());
}
// let logging finish
thread::sleep(Duration::from_millis(200));
}
| setup |
urls.go | package flavors
import (
"github.com/apcera/libretto/Godeps/_workspace/src/github.com/rackspace/gophercloud"
)
func getURL(client *gophercloud.ServiceClient, id string) string | {
return client.ServiceURL("flavors", id)
} |
|
main.py | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import sys
import os
import argparse
import pkg_resources
# Adding the necessary path to PYTHONPATH
path = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(path)
from sawtooth_sdk.processor.core import TransactionProcessor
from sawtooth_sdk.processor.log import init_console_logging
from sawtooth_sdk.processor.log import log_configuration
from sawtooth_sdk.processor.config import get_log_config
from sawtooth_sdk.processor.config import get_log_dir
from sawtooth_sdk.processor.config import get_config_dir
from sawtooth_identity.processor.handler import IdentityTransactionHandler
from sawtooth_identity.processor.config.identity import IdentityConfig
from sawtooth_identity.processor.config.identity import \
load_default_identity_config
from sawtooth_identity.processor.config.identity import \
load_toml_identity_config
from sawtooth_identity.processor.config.identity import \
merge_identity_config
DISTRIBUTION_NAME = 'sawtooth-identity'
def parse_args(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-C', '--connect',
help='Endpoint for the validator connection')
parser.add_argument('-v', '--verbose',
action='count',
default=0,
help='Increase output sent to stderr')
try:
version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version
except pkg_resources.DistributionNotFound:
version = 'UNKNOWN'
parser.add_argument(
'-V', '--version',
action='version',
version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}')
.format(version),
help='print version information')
return parser.parse_args(args)
def load_identity_config(first_config):
default_identity_config = \
load_default_identity_config()
conf_file = os.path.join(get_config_dir(), 'identity.toml')
toml_config = load_toml_identity_config(conf_file)
return merge_identity_config(
configs=[first_config, toml_config, default_identity_config])
def create_identity_config(args):
return IdentityConfig(connect=args.connect)
def | (args=None):
if args is None:
args = sys.argv[1:]
opts = parse_args(args)
processor = None
try:
print("here 1")
arg_config = create_identity_config(opts)
identity_config = load_identity_config(arg_config)
processor = TransactionProcessor(url=identity_config.connect)
log_config = get_log_config(filename="identity_log_config.toml")
print("here 2")
# If no toml, try loading yaml
if log_config is None:
log_config = get_log_config(filename="identity_log_config.yaml")
if log_config is not None:
log_configuration(log_config=log_config)
else:
log_dir = get_log_dir()
# use the transaction processor zmq identity for filename
log_configuration(
log_dir=log_dir,
name="identity-" + str(processor.zmq_id)[2:-1])
print('here 3')
init_console_logging(verbose_level=opts.verbose)
print('here 4')
handler = IdentityTransactionHandler()
print('here 5')
processor.add_handler(handler)
print('here 6')
processor.start()
print('here 7')
except KeyboardInterrupt:
pass
except Exception as e: # pylint: disable=broad-except
print("Error: {}".format(e))
finally:
if processor is not None:
processor.stop()
if __name__ == "__main__":
main()
| main |
test_session.py | ######################################################################
#
# File: test_session.py
#
# Copyright 2018 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from b2.exception import InvalidAuthToken, Unauthorized
from b2.raw_api import ALL_CAPABILITIES
from b2.session import B2Session
from .test_base import TestBase
try:
import unittest.mock as mock
except ImportError:
import mock
class TestB2Session(TestBase):
def setUp(self):
self.account_info = mock.MagicMock()
self.account_info.get_account_auth_token.return_value = 'auth_token'
self.api = mock.MagicMock()
self.api.account_info = self.account_info
self.raw_api = mock.MagicMock()
self.raw_api.do_it.__name__ = 'do_it'
self.raw_api.do_it.side_effect = ['ok']
self.session = B2Session(self.api, self.raw_api)
def test_works_first_time(self):
self.assertEqual('ok', self.session.do_it())
def test_works_second_time(self):
self.raw_api.do_it.side_effect = [
InvalidAuthToken('message', 'code'),
'ok',
]
self.assertEqual('ok', self.session.do_it())
def test_fails_second_time(self):
self.raw_api.do_it.side_effect = [
InvalidAuthToken('message', 'code'),
InvalidAuthToken('message', 'code'),
]
with self.assertRaises(InvalidAuthToken):
self.session.do_it() | bucketId=None,
bucketName=None,
capabilities=ALL_CAPABILITIES,
namePrefix=None,
)
self.raw_api.do_it.side_effect = Unauthorized('no_go', 'code')
with self.assertRaisesRegexp(
Unauthorized, r'no_go for application key with no restrictions \(code\)'
):
self.session.do_it()
def test_app_key_info_no_info_no_message(self):
self.account_info.get_allowed.return_value = dict(
bucketId=None,
bucketName=None,
capabilities=ALL_CAPABILITIES,
namePrefix=None,
)
self.raw_api.do_it.side_effect = Unauthorized('', 'code')
with self.assertRaisesRegexp(
Unauthorized, r'unauthorized for application key with no restrictions \(code\)'
):
self.session.do_it()
def test_app_key_info_all_info(self):
self.account_info.get_allowed.return_value = dict(
bucketId='123456',
bucketName='my-bucket',
capabilities=['readFiles'],
namePrefix='prefix/',
)
self.raw_api.do_it.side_effect = Unauthorized('no_go', 'code')
with self.assertRaisesRegexp(
Unauthorized,
r"no_go for application key with capabilities 'readFiles', restricted to bucket 'my-bucket', restricted to files that start with 'prefix/' \(code\)"
):
self.session.do_it() |
def test_app_key_info_no_info(self):
self.account_info.get_allowed.return_value = dict( |
data_connector.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['DataConnector']
class DataConnector(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_connector_id: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[Union[str, 'DataConnectorKind']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataConnector':
"""
Get an existing DataConnector resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return DataConnector(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
The data connector kind
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| """
Data connector.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] data_connector_id: Connector ID
:param pulumi.Input[str] etag: Etag of the azure resource
:param pulumi.Input[Union[str, 'DataConnectorKind']] kind: The data connector kind
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data_connector_id'] = data_connector_id
__props__['etag'] = etag
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__['kind'] = kind
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:securityinsights:DataConnector"), pulumi.Alias(type_="azure-nextgen:securityinsights/latest:DataConnector")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataConnector, __self__).__init__(
'azure-nextgen:securityinsights/v20200101:DataConnector',
resource_name,
__props__,
opts) |
index.tsx | import React, { useCallback, useState } from 'react';
import { NextPage } from 'next';
import Link from 'next/link';
import { Row, Col, Card, List, Alert } from 'antd';
import { useSetting } from '@/hooks/useSetting';
import { AdminLayout } from '@/layout/AdminLayout';
import { ArticleProvider } from '@/providers/article';
import { CommentProvider } from '@/providers/comment';
import { CommentArticle } from '@/components/comment/CommentArticle';
import { CommentStatus } from '@/components/comment/CommentStatus';
import { CommentAction } from '@/components/comment/CommentAction';
import { CommentContent } from '@/components/comment/CommentContent';
import style from './index.module.scss';
interface IHomeProps {
articles: IArticle[];
comments: IComment[];
}
const actions = [
{
name: '文章管理',
url: '/article',
},
{
name: '评论管理',
url: '/comment',
},
{
name: '文件管理',
url: '/file',
},
{
name: '用户管理',
url: '/user',
},
{
name: '访问管理',
url: '/view',
},
{
name: '系统设置',
url: '/setting',
},
]; | const setting = useSetting();
const [comments, setComments] = useState<IComment[]>(defaultComments);
const getComments = useCallback(() => {
return CommentProvider.getComments({ page: 1, pageSize }).then((res) => {
setComments(res[0]);
return res;
});
}, []);
return (
<AdminLayout>
{!setting || !setting.systemUrl ? (
<div style={{ marginBottom: 24 }}>
<Alert
message={
<span>
系统检测到<strong>系统配置</strong>未完善,
<Link href="/setting?type=系统设置">
<a>点我立即完善</a>
</Link>
</span>
}
type="warning"
/>
</div>
) : null}
<Card title="快速导航" bordered={false} bodyStyle={{ padding: 0 }}>
<Row>
{actions.map((action) => {
return (
<Col
key={action.url}
span={4}
style={{
padding: '2rem 1rem',
textAlign: 'center',
}}
>
<Link href={action.url}>
<a target="_blank" className={style.recentArticleItem}>
<span>{action.name}</span>
</a>
</Link>
</Col>
);
})}
</Row>
</Card>
<Card
title="最新文章"
bordered={false}
style={{ marginTop: 24 }}
bodyStyle={{ padding: 0 }}
extra={
<Link href="/article">
<a>
<span>全部文章</span>
</a>
</Link>
}
>
{articles.map((article) => {
return (
<Card.Grid
key={article.id}
style={{
width: '33.3%',
textAlign: 'center',
}}
hoverable={true}
>
<Link href={`/article/editor/[id]`} as={`/article/editor/` + article.id}>
<a target="_blank" className={style.recentArticleItem}>
<img width={120} alt="文章封面" src={article.cover} />
<p className={style.title}>{article.title}</p>
</a>
</Link>
</Card.Grid>
);
})}
</Card>
<Card
title="最新评论"
style={{ marginTop: 24 }}
bordered={false}
extra={
<Link href="/comment">
<a>
<span>全部评论</span>
</a>
</Link>
}
>
<List
dataSource={comments}
renderItem={(comment) => (
<List.Item
key={comment.id}
actions={[<CommentAction comment={comment} refresh={getComments} />]}
>
<span>{comment.name}</span> 在 <CommentArticle comment={comment} /> 评论{' '}
<CommentContent comment={comment} />
<CommentStatus comment={comment} />
</List.Item>
)}
/>
</Card>
</AdminLayout>
);
};
Home.getInitialProps = async () => {
const [articles, comments] = await Promise.all([
ArticleProvider.getArticles({ page: 1, pageSize }),
CommentProvider.getComments({ page: 1, pageSize }),
]);
return {
articles: articles[0],
comments: comments[0],
};
};
export default Home; | const pageSize = 6;
const Home: NextPage<IHomeProps> = ({ articles = [], comments: defaultComments = [] }) => { |
docker_test.go | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"encoding/json"
"fmt"
"hash/adler32"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"github.com/docker/docker/pkg/jsonmessage"
docker "github.com/fsouza/go-dockerclient"
cadvisorApi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
)
func verifyCalls(t *testing.T, fakeDocker *FakeDockerClient, calls []string) {
fakeDocker.Lock()
defer fakeDocker.Unlock()
verifyStringArrayEquals(t, fakeDocker.called, calls)
}
func | (t *testing.T, actual, expected []string) {
invalid := len(actual) != len(expected)
if !invalid {
for ix, value := range actual {
if expected[ix] != value {
invalid = true
}
}
}
if invalid {
t.Errorf("Expected: %#v, Actual: %#v", expected, actual)
}
}
func TestGetContainerID(t *testing.T) {
fakeDocker := &FakeDockerClient{}
fakeDocker.ContainerList = []docker.APIContainers{
{
ID: "foobar",
Names: []string{"/k8s_foo_qux_ns_1234_42"},
},
{
ID: "barbar",
Names: []string{"/k8s_bar_qux_ns_2565_42"},
},
}
fakeDocker.Container = &docker.Container{
ID: "foobar",
}
dockerContainers, err := GetKubeletDockerContainers(fakeDocker, false)
if err != nil {
t.Errorf("Expected no error, Got %#v", err)
}
if len(dockerContainers) != 2 {
t.Errorf("Expected %#v, Got %#v", fakeDocker.ContainerList, dockerContainers)
}
verifyCalls(t, fakeDocker, []string{"list"})
dockerContainer, found, _ := dockerContainers.FindPodContainer("qux_ns", "", "foo")
if dockerContainer == nil || !found {
t.Errorf("Failed to find container %#v", dockerContainer)
}
fakeDocker.ClearCalls()
dockerContainer, found, _ = dockerContainers.FindPodContainer("foobar", "", "foo")
verifyCalls(t, fakeDocker, []string{})
if dockerContainer != nil || found {
t.Errorf("Should not have found container %#v", dockerContainer)
}
}
func verifyPackUnpack(t *testing.T, podNamespace, podUID, podName, containerName string) {
container := &api.Container{Name: containerName}
hasher := adler32.New()
util.DeepHashObject(hasher, *container)
computedHash := uint64(hasher.Sum32())
podFullName := fmt.Sprintf("%s_%s", podName, podNamespace)
_, name := BuildDockerName(KubeletContainerName{podFullName, types.UID(podUID), container.Name}, container)
returned, hash, err := ParseDockerName(name)
if err != nil {
t.Errorf("Failed to parse Docker container name %q: %v", name, err)
}
if podFullName != returned.PodFullName || podUID != string(returned.PodUID) || containerName != returned.ContainerName || computedHash != hash {
t.Errorf("For (%s, %s, %s, %d), unpacked (%s, %s, %s, %d)", podFullName, podUID, containerName, computedHash, returned.PodFullName, returned.PodUID, returned.ContainerName, hash)
}
}
func TestContainerNaming(t *testing.T) {
podUID := "12345678"
verifyPackUnpack(t, "file", podUID, "name", "container")
verifyPackUnpack(t, "file", podUID, "name-with-dashes", "container")
// UID is same as pod name
verifyPackUnpack(t, "file", podUID, podUID, "container")
// No Container name
verifyPackUnpack(t, "other", podUID, "name", "")
container := &api.Container{Name: "container"}
podName := "foo"
podNamespace := "test"
name := fmt.Sprintf("k8s_%s_%s_%s_%s_42", container.Name, podName, podNamespace, podUID)
podFullName := fmt.Sprintf("%s_%s", podName, podNamespace)
returned, hash, err := ParseDockerName(name)
if err != nil {
t.Errorf("Failed to parse Docker container name %q: %v", name, err)
}
if returned.PodFullName != podFullName || string(returned.PodUID) != podUID || returned.ContainerName != container.Name || hash != 0 {
t.Errorf("unexpected parse: %s %s %s %d", returned.PodFullName, returned.PodUID, returned.ContainerName, hash)
}
}
func TestVersion(t *testing.T) {
fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}}
manager := &DockerManager{client: fakeDocker}
version, err := manager.Version()
if err != nil {
t.Errorf("got error while getting docker server version - %s", err)
}
expectedVersion, _ := docker.NewAPIVersion("1.15")
if e, a := expectedVersion.String(), version.String(); e != a {
t.Errorf("invalid docker server version. expected: %v, got: %v", e, a)
}
}
func TestExecSupportExists(t *testing.T) {
fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.3.0", "ApiVersion=1.15"}}
runner := &DockerManager{client: fakeDocker}
useNativeExec, err := runner.nativeExecSupportExists()
if err != nil {
t.Errorf("got error while checking for exec support - %s", err)
}
if !useNativeExec {
t.Errorf("invalid exec support check output. Expected true")
}
}
func TestExecSupportNotExists(t *testing.T) {
fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.2", "ApiVersion=1.14"}}
runner := &DockerManager{client: fakeDocker}
useNativeExec, _ := runner.nativeExecSupportExists()
if useNativeExec {
t.Errorf("invalid exec support check output.")
}
}
func TestDockerContainerCommand(t *testing.T) {
runner := &DockerManager{}
containerID := "1234"
command := []string{"ls"}
cmd, _ := runner.getRunInContainerCommand(containerID, command)
if cmd.Dir != "/var/lib/docker/execdriver/native/"+containerID {
t.Errorf("unexpected command CWD: %s", cmd.Dir)
}
if !reflect.DeepEqual(cmd.Args, []string{"/usr/sbin/nsinit", "exec", "ls"}) {
t.Errorf("unexpected command args: %s", cmd.Args)
}
}
func TestParseImageName(t *testing.T) {
tests := []struct {
imageName string
name string
tag string
}{
{"ubuntu", "ubuntu", ""},
{"ubuntu:2342", "ubuntu", "2342"},
{"ubuntu:latest", "ubuntu", "latest"},
{"foo/bar:445566", "foo/bar", "445566"},
{"registry.example.com:5000/foobar", "registry.example.com:5000/foobar", ""},
{"registry.example.com:5000/foobar:5342", "registry.example.com:5000/foobar", "5342"},
{"registry.example.com:5000/foobar:latest", "registry.example.com:5000/foobar", "latest"},
}
for _, test := range tests {
name, tag := parseImageName(test.imageName)
if name != test.name || tag != test.tag {
t.Errorf("Expected name/tag: %s/%s, got %s/%s", test.name, test.tag, name, tag)
}
}
}
func TestPullWithNoSecrets(t *testing.T) {
tests := []struct {
imageName string
expectedImage string
}{
{"ubuntu", "ubuntu:latest using {}"},
{"ubuntu:2342", "ubuntu:2342 using {}"},
{"ubuntu:latest", "ubuntu:latest using {}"},
{"foo/bar:445566", "foo/bar:445566 using {}"},
{"registry.example.com:5000/foobar", "registry.example.com:5000/foobar:latest using {}"},
{"registry.example.com:5000/foobar:5342", "registry.example.com:5000/foobar:5342 using {}"},
{"registry.example.com:5000/foobar:latest", "registry.example.com:5000/foobar:latest using {}"},
}
for _, test := range tests {
fakeKeyring := &credentialprovider.FakeKeyring{}
fakeClient := &FakeDockerClient{}
dp := dockerPuller{
client: fakeClient,
keyring: fakeKeyring,
}
err := dp.Pull(test.imageName, []api.Secret{})
if err != nil {
t.Errorf("unexpected non-nil err: %s", err)
continue
}
if e, a := 1, len(fakeClient.pulled); e != a {
t.Errorf("%s: expected 1 pulled image, got %d: %v", test.imageName, a, fakeClient.pulled)
continue
}
if e, a := test.expectedImage, fakeClient.pulled[0]; e != a {
t.Errorf("%s: expected pull of %q, but got %q", test.imageName, e, a)
}
}
}
func TestPullWithJSONError(t *testing.T) {
tests := map[string]struct {
imageName string
err error
expectedError string
}{
"Json error": {
"ubuntu",
&jsonmessage.JSONError{Code: 50, Message: "Json error"},
"Json error",
},
"Bad gateway": {
"ubuntu",
&jsonmessage.JSONError{Code: 502, Message: "<!doctype html>\n<html class=\"no-js\" lang=\"\">\n <head>\n </head>\n <body>\n <h1>Oops, there was an error!</h1>\n <p>We have been contacted of this error, feel free to check out <a href=\"http://status.docker.com/\">status.docker.com</a>\n to see if there is a bigger issue.</p>\n\n </body>\n</html>"},
"because the registry is temporarily unavailable",
},
}
for i, test := range tests {
fakeKeyring := &credentialprovider.FakeKeyring{}
fakeClient := &FakeDockerClient{
Errors: map[string]error{"pull": test.err},
}
puller := &dockerPuller{
client: fakeClient,
keyring: fakeKeyring,
}
err := puller.Pull(test.imageName, []api.Secret{})
if err == nil || !strings.Contains(err.Error(), test.expectedError) {
t.Errorf("%s: expect error %s, got : %s", i, test.expectedError, err)
continue
}
}
}
func TestPullWithSecrets(t *testing.T) {
// auth value is equivalent to: "username":"passed-user","password":"passed-password"
dockerCfg := map[string]map[string]string{"index.docker.io/v1/": {"email": "passed-email", "auth": "cGFzc2VkLXVzZXI6cGFzc2VkLXBhc3N3b3Jk"}}
dockercfgContent, err := json.Marshal(dockerCfg)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
tests := map[string]struct {
imageName string
passedSecrets []api.Secret
builtInDockerConfig credentialprovider.DockerConfig
expectedPulls []string
}{
"no matching secrets": {
"ubuntu",
[]api.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{}),
[]string{"ubuntu:latest using {}"},
},
"default keyring secrets": {
"ubuntu",
[]api.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email"}}),
[]string{`ubuntu:latest using {"username":"built-in","password":"password","email":"email"}`},
},
"default keyring secrets unused": {
"ubuntu",
[]api.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"extraneous": {"built-in", "password", "email"}}),
[]string{`ubuntu:latest using {}`},
},
"builtin keyring secrets, but use passed": {
"ubuntu",
[]api.Secret{{Type: api.SecretTypeDockercfg, Data: map[string][]byte{api.DockerConfigKey: dockercfgContent}}},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email"}}),
[]string{`ubuntu:latest using {"username":"passed-user","password":"passed-password","email":"passed-email"}`},
},
}
for _, test := range tests {
builtInKeyRing := &credentialprovider.BasicDockerKeyring{}
builtInKeyRing.Add(test.builtInDockerConfig)
fakeClient := &FakeDockerClient{}
dp := dockerPuller{
client: fakeClient,
keyring: builtInKeyRing,
}
err := dp.Pull(test.imageName, test.passedSecrets)
if err != nil {
t.Errorf("unexpected non-nil err: %s", err)
continue
}
if e, a := 1, len(fakeClient.pulled); e != a {
t.Errorf("%s: expected 1 pulled image, got %d: %v", test.imageName, a, fakeClient.pulled)
continue
}
if e, a := test.expectedPulls, fakeClient.pulled; !reflect.DeepEqual(e, a) {
t.Errorf("%s: expected pull of %v, but got %v", test.imageName, e, a)
}
}
}
func TestDockerKeyringLookupFails(t *testing.T) {
fakeKeyring := &credentialprovider.FakeKeyring{}
fakeClient := &FakeDockerClient{
Errors: map[string]error{"pull": fmt.Errorf("test error")},
}
dp := dockerPuller{
client: fakeClient,
keyring: fakeKeyring,
}
err := dp.Pull("host/repository/image:version", []api.Secret{})
if err == nil {
t.Errorf("unexpected non-error")
}
msg := "image pull failed for host/repository/image:version, this may be because there are no credentials on this request. details: (test error)"
if err.Error() != msg {
t.Errorf("expected: %s, saw: %s", msg, err.Error())
}
}
func TestDockerKeyringLookup(t *testing.T) {
ada := docker.AuthConfiguration{
Username: "ada",
Password: "smash",
Email: "[email protected]",
}
grace := docker.AuthConfiguration{
Username: "grace",
Password: "squash",
Email: "[email protected]",
}
dk := &credentialprovider.BasicDockerKeyring{}
dk.Add(credentialprovider.DockerConfig{
"bar.example.com/pong": credentialprovider.DockerConfigEntry{
Username: grace.Username,
Password: grace.Password,
Email: grace.Email,
},
"bar.example.com": credentialprovider.DockerConfigEntry{
Username: ada.Username,
Password: ada.Password,
Email: ada.Email,
},
})
tests := []struct {
image string
match []docker.AuthConfiguration
ok bool
}{
// direct match
{"bar.example.com", []docker.AuthConfiguration{ada}, true},
// direct match deeper than other possible matches
{"bar.example.com/pong", []docker.AuthConfiguration{grace, ada}, true},
// no direct match, deeper path ignored
{"bar.example.com/ping", []docker.AuthConfiguration{ada}, true},
// match first part of path token
{"bar.example.com/pongz", []docker.AuthConfiguration{grace, ada}, true},
// match regardless of sub-path
{"bar.example.com/pong/pang", []docker.AuthConfiguration{grace, ada}, true},
// no host match
{"example.com", []docker.AuthConfiguration{}, false},
{"foo.example.com", []docker.AuthConfiguration{}, false},
}
for i, tt := range tests {
match, ok := dk.Lookup(tt.image)
if tt.ok != ok {
t.Errorf("case %d: expected ok=%t, got %t", i, tt.ok, ok)
}
if !reflect.DeepEqual(tt.match, match) {
t.Errorf("case %d: expected match=%#v, got %#v", i, tt.match, match)
}
}
}
// This validates that dockercfg entries with a scheme and url path are properly matched
// by images that only match the hostname.
// NOTE: the above covers the case of a more specific match trumping just hostname.
func TestIssue3797(t *testing.T) {
rex := docker.AuthConfiguration{
Username: "rex",
Password: "tiny arms",
Email: "[email protected]",
}
dk := &credentialprovider.BasicDockerKeyring{}
dk.Add(credentialprovider.DockerConfig{
"https://quay.io/v1/": credentialprovider.DockerConfigEntry{
Username: rex.Username,
Password: rex.Password,
Email: rex.Email,
},
})
tests := []struct {
image string
match []docker.AuthConfiguration
ok bool
}{
// direct match
{"quay.io", []docker.AuthConfiguration{rex}, true},
// partial matches
{"quay.io/foo", []docker.AuthConfiguration{rex}, true},
{"quay.io/foo/bar", []docker.AuthConfiguration{rex}, true},
}
for i, tt := range tests {
match, ok := dk.Lookup(tt.image)
if tt.ok != ok {
t.Errorf("case %d: expected ok=%t, got %t", i, tt.ok, ok)
}
if !reflect.DeepEqual(tt.match, match) {
t.Errorf("case %d: expected match=%#v, got %#v", i, tt.match, match)
}
}
}
type imageTrackingDockerClient struct {
*FakeDockerClient
imageName string
}
func (f *imageTrackingDockerClient) InspectImage(name string) (image *docker.Image, err error) {
image, err = f.FakeDockerClient.InspectImage(name)
f.imageName = name
return
}
func TestIsImagePresent(t *testing.T) {
cl := &imageTrackingDockerClient{&FakeDockerClient{}, ""}
puller := &dockerPuller{
client: cl,
}
_, _ = puller.IsImagePresent("abc:123")
if cl.imageName != "abc:123" {
t.Errorf("expected inspection of image abc:123, instead inspected image %v", cl.imageName)
}
}
type podsByID []*kubecontainer.Pod
func (b podsByID) Len() int { return len(b) }
func (b podsByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b podsByID) Less(i, j int) bool { return b[i].ID < b[j].ID }
type containersByID []*kubecontainer.Container
func (b containersByID) Len() int { return len(b) }
func (b containersByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b containersByID) Less(i, j int) bool { return b[i].ID < b[j].ID }
func TestFindContainersByPod(t *testing.T) {
tests := []struct {
containerList []docker.APIContainers
exitedContainerList []docker.APIContainers
all bool
expectedPods []*kubecontainer.Pod
}{
{
[]docker.APIContainers{
{
ID: "foobar",
Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"},
},
{
ID: "barbar",
Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"},
},
{
ID: "baz",
Names: []string{"/k8s_baz.1234_qux_ns_1234_42"},
},
},
[]docker.APIContainers{
{
ID: "barfoo",
Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"},
},
{
ID: "bazbaz",
Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"},
},
},
false,
[]*kubecontainer.Pod{
{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: "foobar",
Name: "foobar",
Hash: 0x1234,
},
{
ID: "baz",
Name: "baz",
Hash: 0x1234,
},
},
},
{
ID: "2343",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: "barbar",
Name: "barbar",
Hash: 0x1234,
},
},
},
},
},
{
[]docker.APIContainers{
{
ID: "foobar",
Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"},
},
{
ID: "barbar",
Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"},
},
{
ID: "baz",
Names: []string{"/k8s_baz.1234_qux_ns_1234_42"},
},
},
[]docker.APIContainers{
{
ID: "barfoo",
Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"},
},
{
ID: "bazbaz",
Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"},
},
},
true,
[]*kubecontainer.Pod{
{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: "foobar",
Name: "foobar",
Hash: 0x1234,
},
{
ID: "barfoo",
Name: "barfoo",
Hash: 0x1234,
},
{
ID: "baz",
Name: "baz",
Hash: 0x1234,
},
},
},
{
ID: "2343",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: "barbar",
Name: "barbar",
Hash: 0x1234,
},
},
},
{
ID: "5678",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: "bazbaz",
Name: "bazbaz",
Hash: 0x1234,
},
},
},
},
},
{
[]docker.APIContainers{},
[]docker.APIContainers{},
true,
nil,
},
}
fakeClient := &FakeDockerClient{}
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorApi.MachineInfo{}, PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil)
for i, test := range tests {
fakeClient.ContainerList = test.containerList
fakeClient.ExitedContainerList = test.exitedContainerList
result, _ := containerManager.GetPods(test.all)
for i := range result {
sort.Sort(containersByID(result[i].Containers))
}
for i := range test.expectedPods {
sort.Sort(containersByID(test.expectedPods[i].Containers))
}
sort.Sort(podsByID(result))
sort.Sort(podsByID(test.expectedPods))
if !reflect.DeepEqual(test.expectedPods, result) {
t.Errorf("%d: expected: %#v, saw: %#v", i, test.expectedPods, result)
}
}
}
func TestMakePortsAndBindings(t *testing.T) {
ports := []kubecontainer.PortMapping{
{
ContainerPort: 80,
HostPort: 8080,
HostIP: "127.0.0.1",
},
{
ContainerPort: 443,
HostPort: 443,
Protocol: "tcp",
},
{
ContainerPort: 444,
HostPort: 444,
Protocol: "udp",
},
{
ContainerPort: 445,
HostPort: 445,
Protocol: "foobar",
},
{
ContainerPort: 443,
HostPort: 446,
Protocol: "tcp",
},
{
ContainerPort: 443,
HostPort: 446,
Protocol: "udp",
},
}
exposedPorts, bindings := makePortsAndBindings(ports)
// Count the expected exposed ports and bindings
expectedExposedPorts := map[string]struct{}{}
for _, binding := range ports {
dockerKey := strconv.Itoa(binding.ContainerPort) + "/" + string(binding.Protocol)
expectedExposedPorts[dockerKey] = struct{}{}
}
// Should expose right ports in docker
if len(expectedExposedPorts) != len(exposedPorts) {
t.Errorf("Unexpected ports and bindings, %#v %#v %#v", ports, exposedPorts, bindings)
}
// Construct expected bindings
expectPortBindings := map[string][]docker.PortBinding{
"80/tcp": {
docker.PortBinding{
HostPort: "8080",
HostIP: "127.0.0.1",
},
},
"443/tcp": {
docker.PortBinding{
HostPort: "443",
HostIP: "",
},
docker.PortBinding{
HostPort: "446",
HostIP: "",
},
},
"443/udp": {
docker.PortBinding{
HostPort: "446",
HostIP: "",
},
},
"444/udp": {
docker.PortBinding{
HostPort: "444",
HostIP: "",
},
},
"445/tcp": {
docker.PortBinding{
HostPort: "445",
HostIP: "",
},
},
}
// interate the bindings by dockerPort, and check its portBindings
for dockerPort, portBindings := range bindings {
switch dockerPort {
case "80/tcp", "443/tcp", "443/udp", "444/udp", "445/tcp":
if !reflect.DeepEqual(expectPortBindings[string(dockerPort)], portBindings) {
t.Errorf("Unexpected portbindings for %#v, expected: %#v, but got: %#v",
dockerPort, expectPortBindings[string(dockerPort)], portBindings)
}
default:
t.Errorf("Unexpected docker port: %#v with portbindings: %#v", dockerPort, portBindings)
}
}
}
func TestMilliCPUToQuota(t *testing.T) {
testCases := []struct {
input int64
quota int64
period int64
}{
{
input: int64(0),
quota: int64(0),
period: int64(0),
},
{
input: int64(200),
quota: int64(20000),
period: int64(100000),
},
{
input: int64(500),
quota: int64(50000),
period: int64(100000),
},
{
input: int64(1000),
quota: int64(100000),
period: int64(100000),
},
{
input: int64(1500),
quota: int64(150000),
period: int64(100000),
},
}
for _, testCase := range testCases {
quota, period := milliCPUToQuota(testCase.input)
if quota != testCase.quota || period != testCase.period {
t.Errorf("Input %v, expected quota %v period %v, but got quota %v period %v", testCase.input, testCase.quota, testCase.period, quota, period)
}
}
}
| verifyStringArrayEquals |
vterm.rs | use libc::{c_int, size_t};
use std::io::prelude::*;
use std::ptr::NonNull;
use std::sync::mpsc;
use super::*;
pub struct VTerm {
pub ptr: NonNull<ffi::VTerm>,
pub screen_callbacks: Option<ffi::VTermScreenCallbacks>,
pub screen_event_rx: Option<mpsc::Receiver<ScreenEvent>>,
pub screen_event_tx: Option<mpsc::Sender<ScreenEvent>>,
pub screen_ptr: NonNull<ffi::VTermScreen>,
pub state_callbacks: Option<ffi::VTermStateCallbacks>,
pub state_event_rx: Option<mpsc::Receiver<StateEvent>>,
pub state_event_tx: Option<mpsc::Sender<StateEvent>>,
pub state_ptr: NonNull<ffi::VTermState>,
}
impl VTerm {
/// Attempt to create a new VTerm of the given size.
pub fn new(size: &Size) -> Option<VTerm> {
let mut vterm_ptr =
unsafe { NonNull::new(ffi::vterm_new(size.height as c_int, size.width as c_int))? };
let screen_ptr = unsafe { NonNull::new(ffi::vterm_obtain_screen(vterm_ptr.as_mut()))? };
let state_ptr = unsafe { NonNull::new(ffi::vterm_obtain_state(vterm_ptr.as_mut()))? };
let mut vterm = VTerm {
ptr: vterm_ptr,
screen_callbacks: None,
screen_event_rx: None,
screen_event_tx: None,
screen_ptr: screen_ptr,
state_callbacks: None,
state_event_rx: None,
state_event_tx: None,
state_ptr: state_ptr,
};
vterm.screen_reset(true);
Some(vterm)
}
pub fn get_size(&self) -> Size {
let mut cols: c_int = 0;
let mut rows: c_int = 0;
unsafe {
ffi::vterm_get_size(self.ptr.as_ref(), &mut rows, &mut cols);
}
Size {
height: rows as usize,
width: cols as usize,
}
}
pub fn set_size(&mut self, size: &Size) {
unsafe {
ffi::vterm_set_size(self.ptr.as_mut(), size.height as c_int, size.width as c_int);
}
}
pub fn get_utf8(&self) -> bool {
unsafe { super::int_to_bool(ffi::vterm_get_utf8(self.ptr.as_ref())) }
}
pub fn | (&mut self, is_utf8: bool) {
unsafe { ffi::vterm_set_utf8(self.ptr.as_mut(), super::bool_to_int(is_utf8)) }
}
}
impl Write for VTerm {
fn write(&mut self, buf: &[u8]) -> ::std::io::Result<usize> {
let size = unsafe {
ffi::vterm_input_write(self.ptr.as_mut(), buf.as_ptr(), buf.len() as size_t) as usize
};
Ok(size)
}
fn flush(&mut self) -> ::std::io::Result<()> {
self.screen_flush_damage();
Ok(())
}
}
impl Drop for VTerm {
fn drop(&mut self) {
unsafe { ffi::vterm_free(self.ptr.as_mut()) }
}
}
mod tests {
#![allow(unused_imports)]
use super::super::*;
use std::io::prelude::*;
#[test]
fn vterm_can_create_and_destroy() {
let vterm: VTerm = VTerm::new(&Size {
height: 2,
width: 2,
})
.unwrap();
drop(vterm);
}
#[test]
fn vterm_can_get_size() {
let vterm: VTerm = VTerm::new(&Size {
height: 2,
width: 3,
})
.unwrap();
let size = vterm.get_size();
assert_eq!((2, 3), (size.height, size.width));
}
#[test]
fn vterm_can_set_size() {
let mut vterm: VTerm = VTerm::new(&Size {
height: 2,
width: 3,
})
.unwrap();
vterm.set_size(&Size {
height: 1,
width: 2,
});
let size = vterm.get_size();
assert_eq!((1, 2), (size.height, size.width));
}
#[test]
fn vterm_can_get_and_set_utf8() {
let mut vterm: VTerm = VTerm::new(&Size {
height: 2,
width: 2,
})
.unwrap();
vterm.set_utf8(true);
assert_eq!(true, vterm.get_utf8());
vterm.set_utf8(false);
assert_eq!(false, vterm.get_utf8());
}
#[test]
fn vterm_can_write() {
let mut vterm: VTerm = VTerm::new(&Size {
height: 2,
width: 2,
})
.unwrap();
let input: &[u8] = "abcd".as_bytes();
let result = vterm.write(input);
assert!(result.is_ok());
assert_eq!(4, result.unwrap());
}
}
| set_utf8 |
run.py | from fh_webhook import create_app
app = create_app()
if __name__ == "__main__": | app.run() | |
tl_photo_gen.go | // Code generated by gotdgen, DO NOT EDIT.
package tg
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"go.uber.org/multierr"
"github.com/gotd/td/bin"
"github.com/gotd/td/tdp"
"github.com/gotd/td/tgerr"
)
// No-op definition for keeping imports.
var (
_ = bin.Buffer{}
_ = context.Background()
_ = fmt.Stringer(nil)
_ = strings.Builder{}
_ = errors.Is
_ = multierr.AppendInto
_ = sort.Ints
_ = tdp.Format
_ = tgerr.Error{}
)
// PhotoEmpty represents TL type `photoEmpty#2331b22d`.
// Empty constructor, non-existent photo
//
// See https://core.telegram.org/constructor/photoEmpty for reference.
type PhotoEmpty struct {
// Photo identifier
ID int64
}
// PhotoEmptyTypeID is TL type id of PhotoEmpty.
const PhotoEmptyTypeID = 0x2331b22d
// construct implements constructor of PhotoClass.
func (p PhotoEmpty) construct() PhotoClass { return &p }
// Ensuring interfaces in compile-time for PhotoEmpty.
var (
_ bin.Encoder = &PhotoEmpty{}
_ bin.Decoder = &PhotoEmpty{}
_ bin.BareEncoder = &PhotoEmpty{}
_ bin.BareDecoder = &PhotoEmpty{}
_ PhotoClass = &PhotoEmpty{}
)
func (p *PhotoEmpty) Zero() bool {
if p == nil {
return true
}
if !(p.ID == 0) {
return false
}
return true
}
// String implements fmt.Stringer.
func (p *PhotoEmpty) String() string {
if p == nil {
return "PhotoEmpty(nil)"
}
type Alias PhotoEmpty
return fmt.Sprintf("PhotoEmpty%+v", Alias(*p))
}
// FillFrom fills PhotoEmpty from given interface.
func (p *PhotoEmpty) FillFrom(from interface {
GetID() (value int64)
}) {
p.ID = from.GetID()
}
// TypeID returns type id in TL schema.
//
// See https://core.telegram.org/mtproto/TL-tl#remarks.
func (*PhotoEmpty) TypeID() uint32 {
return PhotoEmptyTypeID
}
// TypeName returns name of type in TL schema.
func (*PhotoEmpty) TypeName() string {
return "photoEmpty"
}
// TypeInfo returns info about TL type.
func (p *PhotoEmpty) TypeInfo() tdp.Type {
typ := tdp.Type{
Name: "photoEmpty",
ID: PhotoEmptyTypeID,
}
if p == nil {
typ.Null = true
return typ
}
typ.Fields = []tdp.Field{
{
Name: "ID",
SchemaName: "id",
},
}
return typ
}
// Encode implements bin.Encoder.
func (p *PhotoEmpty) Encode(b *bin.Buffer) error {
if p == nil {
return fmt.Errorf("can't encode photoEmpty#2331b22d as nil")
}
b.PutID(PhotoEmptyTypeID)
return p.EncodeBare(b)
}
// EncodeBare implements bin.BareEncoder.
func (p *PhotoEmpty) EncodeBare(b *bin.Buffer) error {
if p == nil {
return fmt.Errorf("can't encode photoEmpty#2331b22d as nil")
}
b.PutLong(p.ID)
return nil
}
// Decode implements bin.Decoder.
func (p *PhotoEmpty) Decode(b *bin.Buffer) error {
if p == nil {
return fmt.Errorf("can't decode photoEmpty#2331b22d to nil")
}
if err := b.ConsumeID(PhotoEmptyTypeID); err != nil {
return fmt.Errorf("unable to decode photoEmpty#2331b22d: %w", err)
}
return p.DecodeBare(b)
}
// DecodeBare implements bin.BareDecoder.
func (p *PhotoEmpty) DecodeBare(b *bin.Buffer) error {
if p == nil {
return fmt.Errorf("can't decode photoEmpty#2331b22d to nil")
}
{
value, err := b.Long()
if err != nil {
return fmt.Errorf("unable to decode photoEmpty#2331b22d: field id: %w", err)
}
p.ID = value
}
return nil
}
// GetID returns value of ID field.
func (p *PhotoEmpty) GetID() (value int64) {
return p.ID
}
// Photo represents TL type `photo#fb197a65`.
// Photo
//
// See https://core.telegram.org/constructor/photo for reference.
type Photo struct {
// Flags, see TL conditional fields¹
//
// Links:
// 1) https://core.telegram.org/mtproto/TL-combinators#conditional-fields
Flags bin.Fields
// Whether the photo has mask stickers attached to it
HasStickers bool
// ID
ID int64
// Access hash
AccessHash int64
// file reference¹
//
// Links:
// 1) https://core.telegram.org/api/file_reference
FileReference []byte
// Date of upload
Date int
// Available sizes for download
Sizes []PhotoSizeClass
// For animated profiles¹, the MPEG4 videos
//
// Links:
// 1) https://core.telegram.org/api/files#animated-profile-pictures
//
// Use SetVideoSizes and GetVideoSizes helpers.
VideoSizes []VideoSize
// DC ID to use for download
DCID int
}
// PhotoTypeID is TL type id of Photo.
const PhotoTypeID = 0xfb197a65
// construct implements constructor of PhotoClass.
func (p Photo) construct() PhotoClass { return &p }
// Ensuring interfaces in compile-time for Photo.
var (
_ bin.Encoder = &Photo{}
_ bin.Decoder = &Photo{}
_ bin.BareEncoder = &Photo{}
_ bin.BareDecoder = &Photo{}
_ PhotoClass = &Photo{}
)
func (p *Photo) Zero() bool {
if p == nil {
return true
}
if !(p.Flags.Zero()) {
return false
}
if !(p.HasStickers == false) {
return false
}
if !(p.ID == 0) {
return false
}
if !(p.AccessHash == 0) {
return false
}
if !(p.FileReference == nil) {
return false
}
if !(p.Date == 0) {
return false
}
if !(p.Sizes == nil) {
return false
}
if !(p.VideoSizes == nil) {
return false
}
if !(p.DCID == 0) {
return false
}
return true
}
// String implements fmt.Stringer.
func (p *Photo) String() string {
if p == nil {
return "Photo(nil)"
}
type Alias Photo
return fmt.Sprintf("Photo%+v", Alias(*p))
}
// FillFrom fills Photo from given interface.
func (p *Photo) FillFrom(from interface {
GetHasStickers() (value bool)
GetID() (value int64)
GetAccessHash() (value int64)
GetFileReference() (value []byte)
GetDate() (value int)
GetSizes() (value []PhotoSizeClass)
GetVideoSizes() (value []VideoSize, ok bool)
GetDCID() (value int)
}) {
p.HasStickers = from.GetHasStickers()
p.ID = from.GetID()
p.AccessHash = from.GetAccessHash()
p.FileReference = from.GetFileReference()
p.Date = from.GetDate()
p.Sizes = from.GetSizes()
if val, ok := from.GetVideoSizes(); ok {
p.VideoSizes = val
}
p.DCID = from.GetDCID()
}
// TypeID returns type id in TL schema.
//
// See https://core.telegram.org/mtproto/TL-tl#remarks.
func (*Photo) TypeID() uint32 {
return PhotoTypeID
}
// TypeName returns name of type in TL schema.
func (*Photo) TypeName() string {
return "photo"
}
// TypeInfo returns info about TL type.
func (p *Photo) TypeInfo() tdp.Type {
typ := tdp.Type{
Name: "photo",
ID: PhotoTypeID,
}
if p == nil {
typ.Null = true
return typ
}
typ.Fields = []tdp.Field{
{
Name: "HasStickers",
SchemaName: "has_stickers",
Null: !p.Flags.Has(0),
},
{
Name: "ID",
SchemaName: "id",
},
{
Name: "AccessHash",
SchemaName: "access_hash",
},
{
Name: "FileReference",
SchemaName: "file_reference",
},
{
Name: "Date",
SchemaName: "date",
},
{
Name: "Sizes",
SchemaName: "sizes",
},
{
Name: "VideoSizes",
SchemaName: "video_sizes",
Null: !p.Flags.Has(1),
},
{
Name: "DCID",
SchemaName: "dc_id",
},
}
return typ
}
// Encode implements bin.Encoder.
func (p *Photo) Encode(b *bin.Buffer) error {
if p == nil {
return fmt.Errorf("can't encode photo#fb197a65 as nil")
}
b.PutID(PhotoTypeID)
return p.EncodeBare(b)
}
// EncodeBare implements bin.BareEncoder.
func (p *Photo) EncodeBare(b *bin.Buffer) error {
if p == nil {
return fmt.Errorf("can't encode photo#fb197a65 as nil")
}
if !(p.HasStickers == false) {
p.Flags.Set(0)
}
if !(p.VideoSizes == nil) {
p.Flags.Set(1)
}
if err := p.Flags.Encode(b); err != nil {
return fmt.Errorf("unable to encode photo#fb197a65: field flags: %w", err)
}
b.PutLong(p.ID)
b.PutLong(p.AccessHash)
b.PutBytes(p.FileReference)
b.PutInt(p.Date)
b.PutVectorHeader(len(p.Sizes))
for idx, v := range p.Sizes {
if v == nil {
return fmt.Errorf("unable to encode photo#fb197a65: field sizes element with index %d is nil", idx)
}
if err := v.Encode(b); err != nil {
return fmt.Errorf("unable to encode photo#fb197a65: field sizes element with index %d: %w", idx, err)
}
}
if p.Flags.Has(1) {
b.PutVectorHeader(len(p.VideoSizes))
for idx, v := range p.VideoSizes {
if err := v.Encode(b); err != nil {
return fmt.Errorf("unable to encode photo#fb197a65: field video_sizes element with index %d: %w", idx, err)
}
}
}
b.PutInt(p.DCID)
return nil
}
// Decode implements bin.Decoder.
func (p *Photo) Decode(b *bin.Buffer) error {
if p == nil {
return fmt.Errorf("can't decode photo#fb197a65 to nil")
}
if err := b.ConsumeID(PhotoTypeID); err != nil {
return fmt.Errorf("unable to decode photo#fb197a65: %w", err)
}
return p.DecodeBare(b)
}
// DecodeBare implements bin.BareDecoder.
func (p *Photo) DecodeBare(b *bin.Buffer) error {
if p == nil {
return fmt.Errorf("can't decode photo#fb197a65 to nil")
}
{
if err := p.Flags.Decode(b); err != nil {
return fmt.Errorf("unable to decode photo#fb197a65: field flags: %w", err)
}
}
p.HasStickers = p.Flags.Has(0)
{
value, err := b.Long()
if err != nil {
return fmt.Errorf("unable to decode photo#fb197a65: field id: %w", err)
}
p.ID = value
}
{
value, err := b.Long()
if err != nil {
return fmt.Errorf("unable to decode photo#fb197a65: field access_hash: %w", err)
}
p.AccessHash = value
}
{
value, err := b.Bytes()
if err != nil {
return fmt.Errorf("unable to decode photo#fb197a65: field file_reference: %w", err)
}
p.FileReference = value
}
{
value, err := b.Int()
if err != nil {
return fmt.Errorf("unable to decode photo#fb197a65: field date: %w", err)
}
p.Date = value
}
{
headerLen, err := b.VectorHeader()
if err != nil {
return fmt.Errorf("unable to decode photo#fb197a65: field sizes: %w", err)
}
if headerLen > 0 {
p.Sizes = make([]PhotoSizeClass, 0, headerLen%bin.PreallocateLimit)
}
for idx := 0; idx < headerLen; idx++ {
value, err := DecodePhotoSize(b)
if err != nil {
return fmt.Errorf("unable to decode photo#fb197a65: field sizes: %w", err)
}
p.Sizes = append(p.Sizes, value)
}
}
if p.Flags.Has(1) {
headerLen, err := b.VectorHeader()
if err != nil {
return fmt.Errorf("unable to decode photo#fb197a65: field video_sizes: %w", err)
}
if headerLen > 0 {
p.VideoSizes = make([]VideoSize, 0, headerLen%bin.PreallocateLimit)
}
for idx := 0; idx < headerLen; idx++ {
var value VideoSize
if err := value.Decode(b); err != nil {
return fmt.Errorf("unable to decode photo#fb197a65: field video_sizes: %w", err)
}
p.VideoSizes = append(p.VideoSizes, value)
}
}
{
value, err := b.Int()
if err != nil {
return fmt.Errorf("unable to decode photo#fb197a65: field dc_id: %w", err)
}
p.DCID = value
}
return nil
}
// SetHasStickers sets value of HasStickers conditional field.
func (p *Photo) SetHasStickers(value bool) {
if value {
p.Flags.Set(0)
p.HasStickers = true
} else {
p.Flags.Unset(0)
p.HasStickers = false
}
}
// GetHasStickers returns value of HasStickers conditional field.
func (p *Photo) GetHasStickers() (value bool) {
return p.Flags.Has(0)
}
// GetID returns value of ID field.
func (p *Photo) GetID() (value int64) {
return p.ID
}
// GetAccessHash returns value of AccessHash field.
func (p *Photo) GetAccessHash() (value int64) {
return p.AccessHash
}
// GetFileReference returns value of FileReference field.
func (p *Photo) GetFileReference() (value []byte) {
return p.FileReference
}
// GetDate returns value of Date field.
func (p *Photo) GetDate() (value int) {
return p.Date
}
// GetSizes returns value of Sizes field.
func (p *Photo) GetSizes() (value []PhotoSizeClass) {
return p.Sizes
}
// SetVideoSizes sets value of VideoSizes conditional field.
func (p *Photo) SetVideoSizes(value []VideoSize) {
p.Flags.Set(1)
p.VideoSizes = value
}
// GetVideoSizes returns value of VideoSizes conditional field and
// boolean which is true if field was set.
func (p *Photo) GetVideoSizes() (value []VideoSize, ok bool) {
if !p.Flags.Has(1) {
return value, false
}
return p.VideoSizes, true
}
// GetDCID returns value of DCID field.
func (p *Photo) GetDCID() (value int) {
return p.DCID
}
// MapSizes returns field Sizes wrapped in PhotoSizeClassArray helper.
func (p *Photo) MapSizes() (value PhotoSizeClassArray) {
return PhotoSizeClassArray(p.Sizes)
}
// PhotoClass represents Photo generic type.
//
// See https://core.telegram.org/type/Photo for reference.
//
// Example:
// g, err := tg.DecodePhoto(buf)
// if err != nil {
// panic(err)
// }
// switch v := g.(type) {
// case *tg.PhotoEmpty: // photoEmpty#2331b22d
// case *tg.Photo: // photo#fb197a65
// default: panic(v)
// }
type PhotoClass interface {
bin.Encoder
bin.Decoder
bin.BareEncoder
bin.BareDecoder
construct() PhotoClass
// TypeID returns type id in TL schema.
//
// See https://core.telegram.org/mtproto/TL-tl#remarks.
TypeID() uint32
// TypeName returns name of type in TL schema.
TypeName() string
// String implements fmt.Stringer.
String() string
// Zero returns true if current object has a zero value.
Zero() bool
// Photo identifier
GetID() (value int64)
// AsNotEmpty tries to map PhotoClass to Photo.
AsNotEmpty() (*Photo, bool)
}
// AsInput tries to map Photo to InputPhoto.
func (p *Photo) AsInput() *InputPhoto {
value := new(InputPhoto)
value.ID = p.GetID()
value.AccessHash = p.GetAccessHash()
value.FileReference = p.GetFileReference()
return value
}
// AsNotEmpty tries to map PhotoEmpty to Photo.
func (p *PhotoEmpty) AsNotEmpty() (*Photo, bool) {
return nil, false
}
// AsNotEmpty tries to map Photo to Photo.
func (p *Photo) AsNotEmpty() (*Photo, bool) {
return p, true
}
// DecodePhoto implements binary de-serialization for PhotoClass.
func Dec | f *bin.Buffer) (PhotoClass, error) {
id, err := buf.PeekID()
if err != nil {
return nil, err
}
switch id {
case PhotoEmptyTypeID:
// Decoding photoEmpty#2331b22d.
v := PhotoEmpty{}
if err := v.Decode(buf); err != nil {
return nil, fmt.Errorf("unable to decode PhotoClass: %w", err)
}
return &v, nil
case PhotoTypeID:
// Decoding photo#fb197a65.
v := Photo{}
if err := v.Decode(buf); err != nil {
return nil, fmt.Errorf("unable to decode PhotoClass: %w", err)
}
return &v, nil
default:
return nil, fmt.Errorf("unable to decode PhotoClass: %w", bin.NewUnexpectedID(id))
}
}
// Photo boxes the PhotoClass providing a helper.
type PhotoBox struct {
Photo PhotoClass
}
// Decode implements bin.Decoder for PhotoBox.
func (b *PhotoBox) Decode(buf *bin.Buffer) error {
if b == nil {
return fmt.Errorf("unable to decode PhotoBox to nil")
}
v, err := DecodePhoto(buf)
if err != nil {
return fmt.Errorf("unable to decode boxed value: %w", err)
}
b.Photo = v
return nil
}
// Encode implements bin.Encode for PhotoBox.
func (b *PhotoBox) Encode(buf *bin.Buffer) error {
if b == nil || b.Photo == nil {
return fmt.Errorf("unable to encode PhotoClass as nil")
}
return b.Photo.Encode(buf)
}
| odePhoto(bu |
transaction.rs | use crate::{
debug_print, ConnectionTrait, DbBackend, DbErr, ExecResult, InnerConnection, QueryResult,
Statement, TransactionStream,
};
#[cfg(feature = "sqlx-dep")]
use crate::{sqlx_error_to_exec_err, sqlx_error_to_query_err};
use futures::lock::Mutex;
#[cfg(feature = "sqlx-dep")]
use sqlx::{pool::PoolConnection, TransactionManager};
use tracing::instrument;
use std::{future::Future, pin::Pin, sync::Arc};
// a Transaction is just a sugar for a connection where START TRANSACTION has been executed
/// Defines a database transaction, whether it is an open transaction and the type of
/// backend to use
pub struct DatabaseTransaction {
conn: Arc<Mutex<InnerConnection>>,
backend: DbBackend,
open: bool,
metric_callback: Option<crate::metric::Callback>,
}
impl std::fmt::Debug for DatabaseTransaction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "DatabaseTransaction")
}
}
impl DatabaseTransaction {
#[cfg(feature = "sqlx-mysql")]
pub(crate) async fn new_mysql(
inner: PoolConnection<sqlx::MySql>,
metric_callback: Option<crate::metric::Callback>,
) -> Result<DatabaseTransaction, DbErr> {
Self::begin(
Arc::new(Mutex::new(InnerConnection::MySql(inner))),
DbBackend::MySql,
metric_callback,
)
.await
}
#[cfg(feature = "sqlx-postgres")]
pub(crate) async fn new_postgres(
inner: PoolConnection<sqlx::Postgres>,
metric_callback: Option<crate::metric::Callback>,
) -> Result<DatabaseTransaction, DbErr> {
Self::begin(
Arc::new(Mutex::new(InnerConnection::Postgres(inner))),
DbBackend::Postgres,
metric_callback,
)
.await
}
#[cfg(feature = "sqlx-sqlite")]
pub(crate) async fn new_sqlite(
inner: PoolConnection<sqlx::Sqlite>,
metric_callback: Option<crate::metric::Callback>,
) -> Result<DatabaseTransaction, DbErr> {
Self::begin(
Arc::new(Mutex::new(InnerConnection::Sqlite(inner))),
DbBackend::Sqlite,
metric_callback,
)
.await
}
#[cfg(feature = "mock")]
pub(crate) async fn new_mock(
inner: Arc<crate::MockDatabaseConnection>,
metric_callback: Option<crate::metric::Callback>,
) -> Result<DatabaseTransaction, DbErr> {
let backend = inner.get_database_backend();
Self::begin(
Arc::new(Mutex::new(InnerConnection::Mock(inner))),
backend,
metric_callback,
).await
}
#[instrument(level = "trace", skip(metric_callback))]
async fn begin(
conn: Arc<Mutex<InnerConnection>>,
backend: DbBackend,
metric_callback: Option<crate::metric::Callback>,
) -> Result<DatabaseTransaction, DbErr> {
let res = DatabaseTransaction {
conn,
backend,
open: true,
metric_callback,
};
match *res.conn.lock().await {
#[cfg(feature = "sqlx-mysql")]
InnerConnection::MySql(ref mut c) => {
<sqlx::MySql as sqlx::Database>::TransactionManager::begin(c)
.await
.map_err(sqlx_error_to_query_err)?
}
#[cfg(feature = "sqlx-postgres")]
InnerConnection::Postgres(ref mut c) => {
<sqlx::Postgres as sqlx::Database>::TransactionManager::begin(c)
.await
.map_err(sqlx_error_to_query_err)?
}
#[cfg(feature = "sqlx-sqlite")]
InnerConnection::Sqlite(ref mut c) => {
<sqlx::Sqlite as sqlx::Database>::TransactionManager::begin(c)
.await
.map_err(sqlx_error_to_query_err)?
}
#[cfg(feature = "mock")]
InnerConnection::Mock(ref mut c) => {
c.begin();
}
}
Ok(res)
}
/// Runs a transaction to completion returning an rolling back the transaction on
/// encountering an error if it fails
#[instrument(level = "trace", skip(callback))]
pub(crate) async fn run<F, T, E>(self, callback: F) -> Result<T, TransactionError<E>>
where
F: for<'b> FnOnce(
&'b DatabaseTransaction,
) -> Pin<Box<dyn Future<Output = Result<T, E>> + Send + 'b>>
+ Send,
T: Send,
E: std::error::Error + Send,
{
let res = callback(&self).await.map_err(TransactionError::Transaction);
if res.is_ok() {
self.commit().await.map_err(TransactionError::Connection)?;
} else {
self.rollback()
.await
.map_err(TransactionError::Connection)?;
}
res
}
/// Commit a transaction atomically
#[instrument(level = "trace")]
pub async fn commit(mut self) -> Result<(), DbErr> {
self.open = false;
match *self.conn.lock().await {
#[cfg(feature = "sqlx-mysql")]
InnerConnection::MySql(ref mut c) => {
<sqlx::MySql as sqlx::Database>::TransactionManager::commit(c)
.await
.map_err(sqlx_error_to_query_err)?
}
#[cfg(feature = "sqlx-postgres")]
InnerConnection::Postgres(ref mut c) => {
<sqlx::Postgres as sqlx::Database>::TransactionManager::commit(c)
.await
.map_err(sqlx_error_to_query_err)?
}
#[cfg(feature = "sqlx-sqlite")]
InnerConnection::Sqlite(ref mut c) => {
<sqlx::Sqlite as sqlx::Database>::TransactionManager::commit(c)
.await
.map_err(sqlx_error_to_query_err)?
}
#[cfg(feature = "mock")]
InnerConnection::Mock(ref mut c) => {
c.commit();
}
}
Ok(())
}
/// rolls back a transaction in case error are encountered during the operation
#[instrument(level = "trace")]
pub async fn rollback(mut self) -> Result<(), DbErr> {
self.open = false;
match *self.conn.lock().await {
#[cfg(feature = "sqlx-mysql")]
InnerConnection::MySql(ref mut c) => {
<sqlx::MySql as sqlx::Database>::TransactionManager::rollback(c)
.await
.map_err(sqlx_error_to_query_err)?
}
#[cfg(feature = "sqlx-postgres")]
InnerConnection::Postgres(ref mut c) => {
<sqlx::Postgres as sqlx::Database>::TransactionManager::rollback(c)
.await
.map_err(sqlx_error_to_query_err)?
}
#[cfg(feature = "sqlx-sqlite")]
InnerConnection::Sqlite(ref mut c) => {
<sqlx::Sqlite as sqlx::Database>::TransactionManager::rollback(c)
.await
.map_err(sqlx_error_to_query_err)?
}
#[cfg(feature = "mock")]
InnerConnection::Mock(ref mut c) => {
c.rollback();
}
}
Ok(())
}
// the rollback is queued and will be performed on next async operation, like returning the connection to the pool
#[instrument(level = "trace")]
fn start_rollback(&mut self) {
if self.open {
if let Some(mut conn) = self.conn.try_lock() {
match &mut *conn {
#[cfg(feature = "sqlx-mysql")]
InnerConnection::MySql(c) => {
<sqlx::MySql as sqlx::Database>::TransactionManager::start_rollback(c);
}
#[cfg(feature = "sqlx-postgres")]
InnerConnection::Postgres(c) => {
<sqlx::Postgres as sqlx::Database>::TransactionManager::start_rollback(c);
}
#[cfg(feature = "sqlx-sqlite")]
InnerConnection::Sqlite(c) => {
<sqlx::Sqlite as sqlx::Database>::TransactionManager::start_rollback(c);
}
#[cfg(feature = "mock")]
InnerConnection::Mock(c) => {
c.rollback();
}
}
} else {
//this should never happen
panic!("Dropping a locked Transaction");
}
}
}
}
impl Drop for DatabaseTransaction {
fn drop(&mut self) {
self.start_rollback();
}
}
#[async_trait::async_trait]
impl<'a> ConnectionTrait<'a> for DatabaseTransaction {
type Stream = TransactionStream<'a>;
fn get_database_backend(&self) -> DbBackend {
// this way we don't need to lock
self.backend
}
#[instrument(level = "trace")]
async fn execute(&self, stmt: Statement) -> Result<ExecResult, DbErr> |
#[instrument(level = "trace")]
async fn query_one(&self, stmt: Statement) -> Result<Option<QueryResult>, DbErr> {
debug_print!("{}", stmt);
let _res = match &mut *self.conn.lock().await {
#[cfg(feature = "sqlx-mysql")]
InnerConnection::MySql(conn) => {
let query = crate::driver::sqlx_mysql::sqlx_query(&stmt);
query.fetch_one(conn).await.map(|row| Some(row.into()))
}
#[cfg(feature = "sqlx-postgres")]
InnerConnection::Postgres(conn) => {
let query = crate::driver::sqlx_postgres::sqlx_query(&stmt);
query.fetch_one(conn).await.map(|row| Some(row.into()))
}
#[cfg(feature = "sqlx-sqlite")]
InnerConnection::Sqlite(conn) => {
let query = crate::driver::sqlx_sqlite::sqlx_query(&stmt);
query.fetch_one(conn).await.map(|row| Some(row.into()))
}
#[cfg(feature = "mock")]
InnerConnection::Mock(conn) => return conn.query_one(stmt),
};
#[cfg(feature = "sqlx-dep")]
if let Err(sqlx::Error::RowNotFound) = _res {
Ok(None)
} else {
_res.map_err(sqlx_error_to_query_err)
}
}
#[instrument(level = "trace")]
async fn query_all(&self, stmt: Statement) -> Result<Vec<QueryResult>, DbErr> {
debug_print!("{}", stmt);
let _res = match &mut *self.conn.lock().await {
#[cfg(feature = "sqlx-mysql")]
InnerConnection::MySql(conn) => {
let query = crate::driver::sqlx_mysql::sqlx_query(&stmt);
query
.fetch_all(conn)
.await
.map(|rows| rows.into_iter().map(|r| r.into()).collect())
}
#[cfg(feature = "sqlx-postgres")]
InnerConnection::Postgres(conn) => {
let query = crate::driver::sqlx_postgres::sqlx_query(&stmt);
query
.fetch_all(conn)
.await
.map(|rows| rows.into_iter().map(|r| r.into()).collect())
}
#[cfg(feature = "sqlx-sqlite")]
InnerConnection::Sqlite(conn) => {
let query = crate::driver::sqlx_sqlite::sqlx_query(&stmt);
query
.fetch_all(conn)
.await
.map(|rows| rows.into_iter().map(|r| r.into()).collect())
}
#[cfg(feature = "mock")]
InnerConnection::Mock(conn) => return conn.query_all(stmt),
};
#[cfg(feature = "sqlx-dep")]
_res.map_err(sqlx_error_to_query_err)
}
#[instrument(level = "trace")]
fn stream(
&'a self,
stmt: Statement,
) -> Pin<Box<dyn Future<Output = Result<Self::Stream, DbErr>> + 'a>> {
Box::pin(
async move { Ok(crate::TransactionStream::build(self.conn.lock().await, stmt, self.metric_callback.clone()).await) },
)
}
#[instrument(level = "trace")]
async fn begin(&self) -> Result<DatabaseTransaction, DbErr> {
DatabaseTransaction::begin(
Arc::clone(&self.conn),
self.backend,
self.metric_callback.clone()
).await
}
/// Execute the function inside a transaction.
/// If the function returns an error, the transaction will be rolled back. If it does not return an error, the transaction will be committed.
#[instrument(level = "trace", skip(_callback))]
async fn transaction<F, T, E>(&self, _callback: F) -> Result<T, TransactionError<E>>
where
F: for<'c> FnOnce(
&'c DatabaseTransaction,
) -> Pin<Box<dyn Future<Output = Result<T, E>> + Send + 'c>>
+ Send,
T: Send,
E: std::error::Error + Send,
{
let transaction = self.begin().await.map_err(TransactionError::Connection)?;
transaction.run(_callback).await
}
}
/// Defines errors for handling transaction failures
#[derive(Debug)]
pub enum TransactionError<E>
where
E: std::error::Error,
{
/// A Database connection error
Connection(DbErr),
/// An error occurring when doing database transactions
Transaction(E),
}
impl<E> std::fmt::Display for TransactionError<E>
where
E: std::error::Error,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TransactionError::Connection(e) => std::fmt::Display::fmt(e, f),
TransactionError::Transaction(e) => std::fmt::Display::fmt(e, f),
}
}
}
impl<E> std::error::Error for TransactionError<E> where E: std::error::Error {}
| {
debug_print!("{}", stmt);
let _res = match &mut *self.conn.lock().await {
#[cfg(feature = "sqlx-mysql")]
InnerConnection::MySql(conn) => {
let query = crate::driver::sqlx_mysql::sqlx_query(&stmt);
crate::metric::metric!(self.metric_callback, &stmt, {
query.execute(conn).await.map(Into::into)
})
}
#[cfg(feature = "sqlx-postgres")]
InnerConnection::Postgres(conn) => {
let query = crate::driver::sqlx_postgres::sqlx_query(&stmt);
crate::metric::metric!(self.metric_callback, &stmt, {
query.execute(conn).await.map(Into::into)
})
}
#[cfg(feature = "sqlx-sqlite")]
InnerConnection::Sqlite(conn) => {
let query = crate::driver::sqlx_sqlite::sqlx_query(&stmt);
crate::metric::metric!(self.metric_callback, &stmt, {
query.execute(conn).await.map(Into::into)
})
}
#[cfg(feature = "mock")]
InnerConnection::Mock(conn) => return conn.execute(stmt),
};
#[cfg(feature = "sqlx-dep")]
_res.map_err(sqlx_error_to_exec_err)
} |
PostgresServerCreateStep.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as SingleModels from "@azure/arm-postgresql";
import * as FlexibleModels from "@azure/arm-postgresql-flexible";
import { LocationListStep } from "@microsoft/vscode-azext-azureutils";
import { AzureWizardExecuteStep, callWithMaskHandling } from '@microsoft/vscode-azext-utils';
import { AppResource } from "@microsoft/vscode-azext-utils/hostapi";
import { Progress } from 'vscode';
import { ext } from '../../../../extensionVariables';
import { createPostgreSQLClient, createPostgreSQLFlexibleClient } from "../../../../utils/azureClients";
import { localize } from '../../../../utils/localize';
import { nonNullProp } from '../../../../utils/nonNull';
import { AbstractServerCreate, PostgresServerType } from '../../../abstract/models';
import { IPostgresServerWizardContext } from '../IPostgresServerWizardContext';
export class PostgresServerCreateStep extends AzureWizardExecuteStep<IPostgresServerWizardContext> {
public priority: number = 150;
public async execute(context: IPostgresServerWizardContext, progress: Progress<{ message?: string; increment?: number }>): Promise<void> {
const locationName: string = (await LocationListStep.getLocation(context)).name;
const rgName: string = nonNullProp(nonNullProp(context, 'resourceGroup'), 'name');
const size: string = nonNullProp(nonNullProp(context, 'sku'), 'size');
const newServerName = nonNullProp(context, 'newServerName');
const password: string = nonNullProp(context, 'adminPassword');
return await callWithMaskHandling(
async () => {
const serverType = nonNullProp(context, 'serverType');
const createMessage: string = localize('creatingPostgresServer', 'Creating PostgreSQL Server "{0}"... It should be ready in several minutes.', context.newServerName);
ext.outputChannel.appendLog(createMessage);
progress.report({ message: createMessage });
const options: AbstractServerCreate = {
location: locationName,
sku: nonNullProp(context, 'sku'),
administratorLogin: nonNullProp(context, 'shortUserName'),
administratorLoginPassword: password,
size: parseInt(size)
};
switch (serverType) {
case PostgresServerType.Single:
const singleClient: SingleModels.PostgreSQLManagementClient = await createPostgreSQLClient(context);
context.server = await singleClient.servers.beginCreateAndWait(rgName, newServerName, this.asSingleParameters(options));
break;
case PostgresServerType.Flexible:
const flexiClient: FlexibleModels.PostgreSQLManagementClient = await createPostgreSQLFlexibleClient(context);
context.server = await flexiClient.servers.beginCreateAndWait(rgName, newServerName, this.asFlexibleParameters(options));
break;
}
context.server.serverType = serverType;
context.activityResult = context.server as AppResource;
},
password);
}
public shouldExecute(context: IPostgresServerWizardContext): boolean {
return !context.server;
}
private asFlexibleParameters(parameters: AbstractServerCreate): FlexibleModels.Server {
return {
location: parameters.location,
version: "12", | storage: {
storageSizeGB: parameters.size
},
sku: {
name: parameters.sku.name,
tier: parameters.sku.tier
},
}
}
private asSingleParameters(parameters: AbstractServerCreate): SingleModels.ServerForCreate {
return {
location: parameters.location,
sku: {
name: parameters.sku.name,
capacity: parameters.sku.capacity,
size: parameters.sku.size,
family: parameters.sku.family,
tier: parameters.sku.tier as SingleModels.SkuTier
},
properties: {
administratorLogin: parameters.administratorLogin,
administratorLoginPassword: parameters.administratorLoginPassword,
sslEnforcement: "Enabled",
createMode: "Default",
version: "11",
storageProfile: {
storageMB: parameters.size
}
}
}
}
} | administratorLogin: parameters.administratorLogin,
administratorLoginPassword: parameters.administratorLoginPassword, |
schedules.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Models for scheduled execution of jobs"""
import enum
from typing import Optional, Type
from flask_appbuilder import Model
from sqlalchemy import Boolean, Column, Enum, ForeignKey, Integer, String, Text
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from superset import security_manager
from superset.models.helpers import AuditMixinNullable, ImportMixin
metadata = Model.metadata # pylint: disable=no-member
class ScheduleType(str, enum.Enum):
slice = "slice"
dashboard = "dashboard"
class EmailDeliveryType(str, enum.Enum):
|
class SliceEmailReportFormat(str, enum.Enum):
visualization = "Visualization"
data = "Raw data"
class EmailSchedule:
"""Schedules for emailing slices / dashboards"""
__tablename__ = "email_schedules"
id = Column(Integer, primary_key=True)
active = Column(Boolean, default=True, index=True)
crontab = Column(String(50))
@declared_attr
def user_id(self):
return Column(Integer, ForeignKey("ab_user.id"))
@declared_attr
def user(self):
return relationship(
security_manager.user_model,
backref=self.__tablename__,
foreign_keys=[self.user_id],
)
recipients = Column(Text)
deliver_as_group = Column(Boolean, default=False)
delivery_type = Column(Enum(EmailDeliveryType))
class DashboardEmailSchedule(Model, AuditMixinNullable, ImportMixin, EmailSchedule):
__tablename__ = "dashboard_email_schedules"
dashboard_id = Column(Integer, ForeignKey("dashboards.id"))
dashboard = relationship(
"Dashboard", backref="email_schedules", foreign_keys=[dashboard_id]
)
class SliceEmailSchedule(Model, AuditMixinNullable, ImportMixin, EmailSchedule):
__tablename__ = "slice_email_schedules"
slice_id = Column(Integer, ForeignKey("slices.id"))
slice = relationship("Slice", backref="email_schedules", foreign_keys=[slice_id])
email_format = Column(Enum(SliceEmailReportFormat))
def get_scheduler_model(report_type: ScheduleType) -> Optional[Type[EmailSchedule]]:
if report_type == ScheduleType.dashboard:
return DashboardEmailSchedule
elif report_type == ScheduleType.slice:
return SliceEmailSchedule
return None
| attachment = "Attachment"
inline = "Inline" |
tempfile.py | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. All of the interfaces
provided by this module can be used without fear of race conditions
except for 'mktemp'. 'mktemp' is subject to race conditions and
should not be used; it is provided for backward compatibility only.
The default path names are returned as str. If you supply bytes as
input, all return values will be in bytes. Ex:
>>> tempfile.mkstemp()
(4, '/tmp/tmptpu9nin8')
>>> tempfile.mkdtemp(suffix=b'')
b'/tmp/tmppbi8f0hy'
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir",
"gettempprefixb", "gettempdirb",
]
# Imports.
import functools as _functools
import warnings as _warnings
import io as _io
import os as _os
try:
import shutil as _shutil
_rmtree = _shutil.rmtree
except ImportError:
import sys as _sys
import stat as _stat
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
if _os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(_os.path.islink, path, _sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = _os.listdir(path)
except OSError:
onerror(_os.listdir, path, _sys.exc_info())
for name in names:
fullname = _os.path.join(path, name)
try:
mode = _os.lstat(fullname).st_mode
except OSError:
mode = 0
if _stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
_os.unlink(fullname)
except OSError:
onerror(_os.unlink, fullname, _sys.exc_info())
try:
_os.rmdir(path)
except OSError:
onerror(_os.rmdir, path, _sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = _os.listdir(topfd)
except OSError as err:
err.filename = path
onerror(_os.listdir, path, _sys.exc_info())
for name in names:
fullname = _os.path.join(path, name)
try:
orig_st = _os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except OSError:
mode = 0
if _stat.S_ISDIR(mode):
try:
dirfd = _os.open(name, _os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(_os.open, fullname, _sys.exc_info())
else:
try:
if _os.path.samestat(orig_st, _os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
_os.rmdir(name, dir_fd=topfd)
except OSError:
onerror(_os.rmdir, fullname, _sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(_os.path.islink, fullname, _sys.exc_info())
finally:
_os.close(dirfd)
else:
try:
_os.unlink(name, dir_fd=topfd)
except OSError:
onerror(_os.unlink, fullname, _sys.exc_info())
_use_fd_functions = ({_os.open, _os.stat, _os.unlink, _os.rmdir} <=
_os.supports_dir_fd and
_os.listdir in _os.supports_fd and
_os.stat in _os.supports_follow_symlinks)
def _rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = _os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = _os.lstat(path)
except Exception:
onerror(_os.lstat, path, _sys.exc_info())
return
try:
fd = _os.open(path, _os.O_RDONLY)
except Exception:
onerror(_os.lstat, path, _sys.exc_info())
return
try:
if _os.path.samestat(orig_st, _os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
_os.rmdir(path)
except OSError:
onerror(_os.rmdir, path, _sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(_os.path.islink, path, _sys.exc_info())
finally:
_os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
import errno as _errno
from random import Random as _Random
import weakref as _weakref
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# This variable _was_ unused for legacy reasons, see issue 10354.
# But as of 3.5 we actually use it at runtime so changing it would
# have a possibly desirable side effect... But we do not want to support
# that as an API. It is undocumented on purpose. Do not depend on this.
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
fd = _os.open(fn, _os.O_RDONLY)
_os.close(fd)
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
def _infer_return_type(*args):
"""Look at the type of all args and divine their implied return type."""
return_type = None
for arg in args:
if arg is None:
continue
if isinstance(arg, bytes):
if return_type is str:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = bytes
else:
if return_type is bytes:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = str
if return_type is None:
return str # tempfile APIs return a str by default.
return return_type
def _sanitize_params(prefix, suffix, dir):
"""Common parameter processing for most APIs in this module."""
output_type = _infer_return_type(prefix, suffix, dir)
if suffix is None:
suffix = output_type()
if prefix is None:
if output_type is str:
prefix = template
else:
prefix = _os.fsencode(template)
if dir is None:
if output_type is str:
dir = gettempdir()
else:
dir = gettempdirb()
return prefix, suffix, dir, output_type
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is eight characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in range(8)]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ _os.path.expanduser(r'~\AppData\Local\Temp'),
_os.path.expandvars(r'%SYSTEMROOT%\Temp'),
r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.abspath(dir)
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
break # no point trying more names in this directory
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags, output_type):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
else:
raise
return (fd, _os.path.abspath(file))
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""The default prefix for temporary directories."""
return template
def gettempprefixb():
"""The default prefix for temporary directories as bytes."""
return _os.fsencode(gettempprefix())
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def gettempdirb():
"""A bytes version of tempfile.gettempdir()."""
return _os.fsencode(gettempdir())
def mkstemp(suffix=None, prefix=None, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is not None, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is not None, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is not None, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
If any of 'suffix', 'prefix' and 'dir' are not None, they must be the
same type. If they are bytes, the returned name will be bytes; str
otherwise.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags, output_type)
def mkdtemp(suffix=None, prefix=None, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
else:
raise
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are similar to mkstemp, except that the 'text' argument is
not accepted, and suffix=None, prefix=None and bytes file names are not
supported.
THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may
refer to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileCloser:
"""A separate object allowing proper closing of a temporary file's
underlying file object, without adding a __del__ method to the
temporary file."""
file = None # Set here since __del__ checks it
close_called = False
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
def close(self, unlink=_os.unlink):
if not self.close_called and self.file is not None:
self.close_called = True
try:
self.file.close()
finally:
if self.delete:
unlink(self.name)
# Need to ensure the file is deleted on __del__
def __del__(self):
self.close()
else:
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
self._closer = _TemporaryFileCloser(file, name, delete)
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if hasattr(a, '__call__'):
func = a
@_functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
# Avoid closing the file as long as the wrapper is alive,
# see issue #18879.
func_wrapper._closer = self._closer
a = func_wrapper
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
def close(self):
"""
Close the temporary file, possibly deleting it.
"""
self._closer.close()
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
# Don't return iter(self.file), but yield from it to avoid closing
# file as long as it's being used as iterator (see issue #23700). We
# can't use 'yield from' here because iter(file) returns the file
# object itself, which has a close method, and thus the file would get
# closed when the generator is finalized, due to PEP380 semantics.
for line in self.file:
yield line
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as its 'name' attribute. The file will be automatically
deleted when it is closed unless the 'delete' argument is set to False.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
except BaseException:
_os.unlink(name)
_os.close(fd)
raise
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
# Is the O_TMPFILE flag available and does it work?
# The flag is set to False if os.open(dir, os.O_TMPFILE) raises an
# IsADirectoryError exception
_O_TMPFILE_WORKS = hasattr(_os, 'O_TMPFILE')
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
global _O_TMPFILE_WORKS
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
if _O_TMPFILE_WORKS:
try:
flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT
fd = _os.open(dir, flags2, 0o600)
except IsADirectoryError:
# Linux kernel older than 3.11 ignores the O_TMPFILE flag:
# O_TMPFILE is read as O_DIRECTORY. Trying to open a directory
# with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a
# directory cannot be open to write. Set flag to False to not
# try again.
_O_TMPFILE_WORKS = False
except OSError:
# The filesystem of the directory does not support O_TMPFILE.
# For example, OSError(95, 'Operation not supported').
#
# On Linux kernel older than 3.11, trying to open a regular
# file (or a symbolic link to a regular file) with O_TMPFILE
# fails with NotADirectoryError, because O_TMPFILE is read as
# O_DIRECTORY.
pass
else:
try:
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
# Fallback to _mkstemp_inner().
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix=None, prefix=None, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# get double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
|
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix=None, prefix=None, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._finalizer = _weakref.finalize(
self, self._cleanup, self.name,
warn_message="Implicitly cleaning up {!r}".format(self))
@classmethod
def _cleanup(cls, name, warn_message):
_rmtree(name)
_warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
_rmtree(self.name)
| return self._file.readline(*args) |
mod.rs | use std::fmt;
use std::ops::Deref;
use async_trait::async_trait;
use futures::stream::{self, Stream, StreamExt, TryStreamExt};
use crate::auth::{Scope, SCOPE_READ, SCOPE_WRITE};
use crate::class::{Instance, State, TCType};
use crate::collection::class::CollectionInstance;
use crate::collection::Collection;
use crate::error;
use crate::general::Map;
use crate::handler::*;
use crate::request::Request;
use crate::scalar::{label, MethodType, PathSegment, Scalar, ScalarClass, Value};
use crate::transaction::{Transact, Txn, TxnId};
use crate::{Match, TCResult, TCTryStream, TryCastInto};
use super::schema::Column;
mod bounds;
mod class;
mod collator;
mod file;
mod slice;
pub use bounds::*;
pub use class::*;
pub use collator::*;
pub use file::*;
pub use slice::*;
pub type Key = Vec<Value>;
fn format_schema(schema: &[Column]) -> String {
let schema: Vec<String> = schema.iter().map(|c| c.to_string()).collect();
format!("[{}]", schema.join(", "))
}
fn validate_key(key: Key, schema: &[Column]) -> TCResult<Key> {
if key.len() != schema.len() {
return Err(error::bad_request(
&format!(
"Invalid key {} for schema",
Value::Tuple(key.to_vec().into())
),
format_schema(schema),
));
}
validate_prefix(key, schema)
}
fn validate_prefix(prefix: Key, schema: &[Column]) -> TCResult<Key> {
if prefix.len() > schema.len() {
return Err(error::bad_request(
&format!(
"Invalid selector {} for schema",
Value::Tuple(prefix.to_vec().into())
),
format_schema(schema),
));
}
prefix
.into_iter()
.zip(schema)
.map(|(value, column)| {
let value = column.dtype().try_cast(value)?;
let key_size = bincode::serialized_size(&value)?;
if let Some(size) = column.max_len() {
if key_size as usize > *size {
return Err(error::bad_request(
"Column value exceeds the maximum length",
column.name(),
));
}
}
Ok(value)
})
.collect()
}
struct CountHandler<'a, T: BTreeInstance> {
btree: &'a T,
}
#[async_trait]
impl<'a, T: BTreeInstance> Handler for CountHandler<'a, T> {
fn subject(&self) -> TCType {
Instance::class(self.btree).into()
}
fn scope(&self) -> Option<Scope> {
Some(SCOPE_READ.into())
}
async fn handle_get(self: Box<Self>, txn: &Txn, range: Value) -> TCResult<State> {
let range = validate_range(range, self.btree.schema())?;
let count = self.btree.len(txn.id(), range).await?;
Ok(State::Scalar(Scalar::Value(Value::Number(count.into()))))
}
}
struct DeleteHandler<'a, T: BTreeInstance> {
btree: &'a T,
}
#[async_trait]
impl<'a, T: BTreeInstance> Handler for DeleteHandler<'a, T> {
fn subject(&self) -> TCType {
Instance::class(self.btree).into()
}
fn scope(&self) -> Option<Scope> {
Some(SCOPE_WRITE.into())
}
async fn handle_delete(self: Box<Self>, txn: &Txn, range: Value) -> TCResult<()> {
let range = validate_range(range, self.btree.schema())?;
BTreeInstance::delete(self.btree, txn.id(), range).await
}
}
struct SliceHandler<'a, T: BTreeInstance> {
btree: &'a T,
}
impl<'a, T: BTreeInstance> SliceHandler<'a, T>
where
Collection: From<T>,
BTree: From<T>,
{
async fn slice(&self, txn: &Txn, range: BTreeRange) -> TCResult<State> {
if range == BTreeRange::default() {
Ok(State::Collection(self.btree.clone().into()))
} else if range.is_key(self.btree.schema()) {
let mut rows = self.btree.stream(txn.id(), range, false).await?;
let row = rows.try_next().await?;
row.ok_or_else(|| error::not_found("(btree key)"))
.map(|key| State::Scalar(Scalar::Value(Value::Tuple(key.to_vec().into()))))
} else {
let slice = BTreeSlice::new(self.btree.clone().into(), range, false)?;
Ok(State::Collection(slice.into()))
}
}
}
#[async_trait]
impl<'a, T: BTreeInstance> Handler for SliceHandler<'a, T>
where
Collection: From<T>,
BTree: From<T>,
{
fn subject(&self) -> TCType {
Instance::class(self.btree).into()
}
fn scope(&self) -> Option<Scope> {
Some(SCOPE_READ.into())
}
async fn handle_get(self: Box<Self>, txn: &Txn, range: Value) -> TCResult<State> {
let range = validate_range(range, self.btree.schema())?;
self.slice(txn, range).await
}
async fn handle_post(
self: Box<Self>,
_request: &Request,
txn: &Txn,
mut params: Map<Scalar>,
) -> TCResult<State> {
let range = params
.remove(&label("where").into())
.unwrap_or_else(|| Scalar::from(()));
let range = validate_range(range, self.btree.schema())?;
self.slice(txn, range).await
}
}
struct ReverseHandler<'a, T: BTreeInstance> {
btree: &'a T,
}
impl<'a, T: BTreeInstance> ReverseHandler<'a, T>
where
BTree: From<T>,
{
fn reverse(&self, range: BTreeRange) -> TCResult<State> {
let slice = BTreeSlice::new(self.btree.clone().into(), range, true)?;
Ok(State::Collection(Collection::BTree(slice.into())))
}
}
#[async_trait]
impl<'a, T: BTreeInstance> Handler for ReverseHandler<'a, T>
where
BTree: From<T>,
{
fn subject(&self) -> TCType {
Instance::class(self.btree).into()
}
fn scope(&self) -> Option<Scope> {
Some(SCOPE_READ.into())
}
async fn handle_get(self: Box<Self>, _txn: &Txn, range: Value) -> TCResult<State> {
let range = validate_range(range, self.btree.schema())?;
self.reverse(range)
}
async fn handle_post(
self: Box<Self>,
_request: &Request,
_txn: &Txn,
mut params: Map<Scalar>,
) -> TCResult<State> {
let range = params
.remove(&label("where").into())
.unwrap_or_else(|| Scalar::from(()));
let range = validate_range(range, self.btree.schema())?;
self.reverse(range)
}
}
struct WriteHandler<'a, T: BTreeInstance> {
btree: &'a T,
}
#[async_trait]
impl<'a, T: BTreeInstance> Handler for WriteHandler<'a, T> {
fn subject(&self) -> TCType {
Instance::class(self.btree).into()
}
fn scope(&self) -> Option<Scope> {
Some(SCOPE_WRITE.into())
}
async fn handle_put(
self: Box<Self>,
_request: &Request,
txn: &Txn,
range: Value,
data: State,
) -> TCResult<()> {
let range = validate_range(range, self.btree.schema())?;
if range == BTreeRange::default() {
match data {
State::Collection(collection) => {
let keys = collection.to_stream(txn).await?;
let keys = keys.map(|key| {
key.and_then(|s| {
s.try_cast_into(|k| error::bad_request("Invalid BTree key", k))
})
});
self.btree.try_insert_from(txn.id(), keys).await?;
}
State::Scalar(scalar) if scalar.matches::<Vec<Key>>() => {
let keys: Vec<Key> = scalar.opt_cast_into().unwrap();
self.btree
.insert_from(txn.id(), stream::iter(keys.into_iter()))
.await?;
}
State::Scalar(scalar) if scalar.matches::<Key>() => {
let key: Key = scalar.opt_cast_into().unwrap();
let key = validate_key(key, self.btree.schema())?;
self.btree.insert(txn.id(), key).await?;
}
other => {
return Err(error::bad_request("Invalid key for BTree", other));
}
}
} else {
return Err(error::not_implemented("BTree::update"));
}
Ok(())
}
}
#[derive(Clone)]
pub struct BTreeImpl<T: BTreeInstance> {
inner: T,
}
impl<T: BTreeInstance> BTreeImpl<T> {
fn into_inner(self) -> T {
self.inner
}
}
impl<T: BTreeInstance> Instance for BTreeImpl<T> {
type Class = BTreeType;
fn class(&self) -> BTreeType {
self.inner.class()
}
}
#[async_trait]
impl<T: BTreeInstance> CollectionInstance for BTreeImpl<T> {
type Item = Key;
async fn is_empty(&self, txn: &Txn) -> TCResult<bool> {
self.inner.is_empty(txn).await
}
async fn to_stream<'a>(&'a self, txn: &'a Txn) -> TCResult<TCTryStream<'a, Scalar>> {
let stream = self.stream(txn.id(), BTreeRange::default(), false).await?;
Ok(Box::pin(stream.map_ok(Scalar::from)))
}
}
impl<T: BTreeInstance> Route for BTreeImpl<T>
where
Collection: From<T>,
BTree: From<T>,
{
fn route(&'_ self, method: MethodType, path: &[PathSegment]) -> Option<Box<dyn Handler + '_>> {
let btree = &self.inner;
if path.is_empty() {
match method {
MethodType::Get | MethodType::Post => Some(Box::new(SliceHandler { btree })),
MethodType::Put => Some(Box::new(WriteHandler { btree })),
MethodType::Delete => Some(Box::new(DeleteHandler { btree })),
}
} else if path.len() == 1 {
match path[0].as_str() {
"count" => Some(Box::new(CountHandler { btree })),
"reverse" => Some(Box::new(ReverseHandler { btree })),
_ => None,
}
} else {
None
}
}
}
impl<T: BTreeInstance> Deref for BTreeImpl<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T: BTreeInstance> From<T> for BTreeImpl<T> {
fn from(inner: T) -> Self {
Self { inner }
}
}
#[derive(Clone)]
pub enum BTree {
Tree(BTreeImpl<BTreeFile>),
View(BTreeImpl<BTreeSlice>),
}
impl Instance for BTree {
type Class = BTreeType;
fn class(&self) -> Self::Class {
match self {
Self::Tree(tree) => tree.class(),
Self::View(view) => view.class(),
}
}
}
#[async_trait]
impl CollectionInstance for BTree {
type Item = Key;
async fn is_empty(&self, txn: &Txn) -> TCResult<bool> {
match self {
Self::Tree(tree) => tree.is_empty(txn).await,
Self::View(view) => view.is_empty(txn).await,
}
}
async fn to_stream<'a>(&'a self, txn: &'a Txn) -> TCResult<TCTryStream<'a, Scalar>> {
match self {
Self::Tree(tree) => tree.to_stream(txn).await,
Self::View(view) => view.to_stream(txn).await,
}
}
}
#[async_trait]
impl BTreeInstance for BTree {
async fn delete(&self, txn_id: &TxnId, range: BTreeRange) -> TCResult<()> {
match self {
Self::Tree(tree) => BTreeInstance::delete(tree.deref(), txn_id, range).await,
Self::View(view) => BTreeInstance::delete(view.deref(), txn_id, range).await,
}
}
async fn insert(&self, txn_id: &TxnId, key: Key) -> TCResult<()> {
match self {
Self::Tree(tree) => tree.insert(txn_id, key).await,
Self::View(view) => view.insert(txn_id, key).await,
}
}
async fn insert_from<S: Stream<Item = Key> + Send>(
&self,
txn_id: &TxnId,
source: S,
) -> TCResult<()> {
match self {
Self::Tree(tree) => tree.insert_from(txn_id, source).await,
Self::View(view) => view.insert_from(txn_id, source).await,
}
}
async fn try_insert_from<S: Stream<Item = TCResult<Key>> + Send>(
&self,
txn_id: &TxnId,
source: S,
) -> TCResult<()> {
match self {
Self::Tree(tree) => tree.try_insert_from(txn_id, source).await,
Self::View(view) => view.try_insert_from(txn_id, source).await,
}
}
async fn is_empty(&self, txn: &Txn) -> TCResult<bool> {
match self {
Self::Tree(tree) => BTreeInstance::is_empty(tree.deref(), txn).await,
Self::View(view) => BTreeInstance::is_empty(view.deref(), txn).await,
}
}
async fn len(&self, txn_id: &TxnId, range: BTreeRange) -> TCResult<u64> {
match self {
Self::Tree(tree) => tree.len(txn_id, range).await,
Self::View(view) => view.len(txn_id, range).await,
}
}
fn schema(&'_ self) -> &'_ [Column] {
match self {
Self::Tree(tree) => tree.schema(),
Self::View(view) => view.schema(),
}
}
async fn stream<'a>(
&'a self,
txn_id: &'a TxnId,
range: BTreeRange,
reverse: bool,
) -> TCResult<TCTryStream<'a, Key>> {
match self {
Self::Tree(tree) => tree.stream(txn_id, range, reverse).await,
Self::View(view) => view.stream(txn_id, range, reverse).await,
}
}
}
impl Route for BTree {
fn route(
&'_ self,
method: MethodType,
path: &'_ [PathSegment],
) -> Option<Box<dyn Handler + '_>> {
match self {
Self::Tree(tree) => tree.route(method, path),
Self::View(view) => view.route(method, path),
}
}
}
#[async_trait]
impl Transact for BTree {
async fn | (&self, txn_id: &TxnId) {
match self {
Self::Tree(tree) => tree.commit(txn_id).await,
Self::View(_) => (), // no-op
}
}
async fn rollback(&self, txn_id: &TxnId) {
match self {
Self::Tree(tree) => tree.rollback(txn_id).await,
Self::View(_) => (), // no-op
}
}
async fn finalize(&self, txn_id: &TxnId) {
match self {
Self::Tree(tree) => tree.finalize(txn_id).await,
Self::View(_) => (), // no-op
}
}
}
impl From<BTreeFile> for BTree {
fn from(btree: BTreeFile) -> BTree {
BTree::Tree(btree.into())
}
}
impl From<BTreeSlice> for BTree {
fn from(slice: BTreeSlice) -> BTree {
BTree::View(slice.into())
}
}
impl fmt::Display for BTree {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::Tree(_) => write!(f, "(b-tree)"),
Self::View(_) => write!(f, "(b-tree slice)"),
}
}
}
| commit |
package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMultcomp(RPackage):
| """Simultaneous tests and confidence intervals for general linear
hypotheses in parametric models, including linear, generalized linear,
linear mixed effects, and survival models. The package includes demos
reproducing analyzes presented in the book "Multiple Comparisons Using R"
(Bretz, Hothorn, Westfall, 2010, CRC Press)."""
homepage = "http://multcomp.r-forge.r-project.org/"
url = "https://cloud.r-project.org/src/contrib/multcomp_1.4-6.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/multcomp"
version('1.4-10', sha256='29bcc635c0262e304551b139cd9ee655ab25a908d9693e1cacabfc2a936df5cf')
version('1.4-8', sha256='a20876619312310e9523d67e9090af501383ce49dc6113c6b4ca30f9c943a73a')
version('1.4-6', sha256='fe9efbe671416a49819cbdb9137cc218faebcd76e0f170fd1c8d3c84c42eeda2')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-codetools', type=('build', 'run')) |
|
core.py | # "Lorenz-95" (or 96) model. For a deeper introduction, see
# "DAPPER/tutorials/T4 - Dynamical systems, chaos, Lorenz.ipynb"
#
# Note: implementation is ndim-agnostic.
import numpy as np
from tools.math import rk4, integrate_TLM, is1d
Force = 8.0
# Note: the model is unstable (blows up) if there are large peaks
# (as may be occasioned by the analysis update, especially with partial obs).
# Example: integrate 4 steps with dt=0.05 from x0 = [0,-30,0,30].
# This is effectively a CFL condition... Can be addressed by:
# - lowering dt
# - using an implicit time stepping scheme instead of rk4
# - stupidly crop amplitudes, as is done here:
prevent_blow_up = False
Tplot = 10
x0 = lambda M: 2.3*np.ones(M)
def dxdt(x):
|
def step(x0, t, dt):
if prevent_blow_up:
clip = abs(x0)>30
x0[clip] *= 0.1
return rk4(lambda t,x: dxdt(x), x0, np.nan, dt)
################################################
# OPTIONAL (not necessary for EnKF or PartFilt):
################################################
def TLM(x):
"""Tangent linear model"""
assert is1d(x)
Nx = len(x)
TLM = np.zeros((Nx,Nx))
md = lambda i: np.mod(i,Nx)
for i in range(Nx):
TLM[i,i] = -1.0
TLM[i, i-2 ] = -x[i-1]
TLM[i,md(i+1)] = +x[i-1]
TLM[i, i-1 ] = x[md(i+1)]-x[i-2]
return TLM
def dfdx(x,t,dt):
"""Integral of TLM. Jacobian of step."""
# method='analytic' is a substantial upgrade for Lor95
return integrate_TLM(TLM(x),dt,method='analytic')
################################################
# Add some non-default liveplotters
################################################
import tools.liveplotting as LP
def LPs(jj=None): return [
(11, 1, LP.spatial1d(jj) ),
(12, 1, LP.correlations ),
(15, 0, LP.spectral_errors),
(13, 0, LP.phase3d(jj) ),
(11, 0, LP.sliding_marginals(jj)) ,
]
| a = x.ndim-1
s = lambda x,n: np.roll(x,-n,axis=a)
return (s(x,1)-s(x,-2))*s(x,-1) - x + Force |
messages.ts | export const PAUSED_VM_MODAL_MESSAGE =
'This VM has been paused. If you wish to unpause it, please click the Unpause button below. For further details, please check with your system administrator.';
export const VIRTUAL_MACHINE_IS_NOT_RUNNING = 'Virtual Machine is not running';
export const NO_GUEST_AGENT_MESSAGE =
'A guest agent was not found for this VM. Either the guest agent was not installed or the VM has not finished booting. When a guest agent is not installed, some management features are unavailable and the metrics might be inaccurate.';
export const GUEST_AGENT_REQUIRED_MESSAGE = 'Guest agent required';
export const NOT_AVAILABLE_MESSAGE = 'Not available';
export const VM_NOT_RUNNING_MESSAGE = 'VM not running';
export const NO_LOGGED_IN_USERS_MSG = 'No users logged in';
export const GUEST_AGENT_FILE_SYSTEMS_DESCRIPTION =
'The following information regarding how the disks are partitioned is provided by the guest agent.';
export const VM_DISKS_DESCRIPTION =
'The following information is provided by the OpenShift Virtualization operator.';
export const V2V_IMPORT_CLOUD_INIT_NOT_AVAILABLE =
'This wizard shows a partial data set. A complete data set is available for viewing when you complete the import process.'; | ||
deps.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Check license of third-party deps by inspecting src/vendor
use std::collections::{BTreeSet, HashSet, HashMap};
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::process::Command;
use serde_json;
const LICENSES: &[&str] = &[
"MIT/Apache-2.0",
"MIT / Apache-2.0",
"Apache-2.0/MIT",
"Apache-2.0 / MIT",
"MIT OR Apache-2.0",
"MIT",
"Unlicense/MIT",
"Unlicense OR MIT",
];
/// These are exceptions to Rust's permissive licensing policy, and
/// should be considered bugs. Exceptions are only allowed in Rust
/// tooling. It is _crucial_ that no exception crates be dependencies
/// of the Rust runtime (std / test).
const EXCEPTIONS: &[&str] = &[
"mdbook", // MPL2, mdbook
"openssl", // BSD+advertising clause, cargo, mdbook
"pest", // MPL2, mdbook via handlebars
"thread-id", // Apache-2.0, mdbook
"toml-query", // MPL-2.0, mdbook
"is-match", // MPL-2.0, mdbook
"cssparser", // MPL-2.0, rustdoc
"smallvec", // MPL-2.0, rustdoc
"fuchsia-zircon-sys", // BSD-3-Clause, rustdoc, rustc, cargo
"fuchsia-zircon", // BSD-3-Clause, rustdoc, rustc, cargo (jobserver & tempdir)
"cssparser-macros", // MPL-2.0, rustdoc
"selectors", // MPL-2.0, rustdoc
"clippy_lints", // MPL-2.0, rls
"colored", // MPL-2.0, rustfmt
"ordslice", // Apache-2.0, rls
"cloudabi", // BSD-2-Clause, (rls -> crossbeam-channel 0.2 -> rand 0.5)
"ryu", // Apache-2.0, rls/cargo/... (b/c of serde)
];
/// Which crates to check against the whitelist?
const WHITELIST_CRATES: &[CrateVersion] = &[
CrateVersion("rustc", "0.0.0"),
CrateVersion("rustc_codegen_llvm", "0.0.0"),
];
/// Whitelist of crates rustc is allowed to depend on. Avoid adding to the list if possible.
const WHITELIST: &[Crate] = &[
Crate("aho-corasick"),
Crate("arrayvec"),
Crate("atty"),
Crate("backtrace"),
Crate("backtrace-sys"),
Crate("bitflags"),
Crate("byteorder"),
Crate("cc"),
Crate("cfg-if"),
Crate("chalk-engine"),
Crate("chalk-macros"),
Crate("cloudabi"),
Crate("cmake"),
Crate("crossbeam-deque"),
Crate("crossbeam-epoch"),
Crate("crossbeam-utils"),
Crate("datafrog"),
Crate("either"),
Crate("ena"),
Crate("env_logger"),
Crate("filetime"),
Crate("flate2"),
Crate("fuchsia-zircon"),
Crate("fuchsia-zircon-sys"),
Crate("getopts"),
Crate("humantime"),
Crate("jobserver"),
Crate("kernel32-sys"),
Crate("lazy_static"),
Crate("libc"),
Crate("lock_api"),
Crate("log"),
Crate("log_settings"),
Crate("memchr"),
Crate("memmap"),
Crate("memoffset"),
Crate("miniz-sys"),
Crate("nodrop"),
Crate("num_cpus"),
Crate("owning_ref"),
Crate("parking_lot"),
Crate("parking_lot_core"),
Crate("pkg-config"),
Crate("polonius-engine"),
Crate("quick-error"),
Crate("rand"),
Crate("rand_core"),
Crate("redox_syscall"),
Crate("redox_termios"),
Crate("regex"),
Crate("regex-syntax"),
Crate("remove_dir_all"),
Crate("rustc-demangle"),
Crate("rustc-hash"),
Crate("rustc-rayon"),
Crate("rustc-rayon-core"),
Crate("scoped-tls"),
Crate("scopeguard"),
Crate("smallvec"),
Crate("stable_deref_trait"),
Crate("tempfile"),
Crate("termcolor"),
Crate("terminon"),
Crate("termion"),
Crate("thread_local"),
Crate("ucd-util"),
Crate("unicode-width"),
Crate("unreachable"),
Crate("utf8-ranges"),
Crate("version_check"),
Crate("void"),
Crate("winapi"),
Crate("winapi-build"),
Crate("winapi-i686-pc-windows-gnu"),
Crate("winapi-util"),
Crate("winapi-x86_64-pc-windows-gnu"),
Crate("wincolor"),
];
// Some types for Serde to deserialize the output of `cargo metadata` to...
#[derive(Deserialize)]
struct Output {
resolve: Resolve,
}
#[derive(Deserialize)]
struct Resolve {
nodes: Vec<ResolveNode>,
}
#[derive(Deserialize)]
struct ResolveNode {
id: String,
dependencies: Vec<String>,
}
/// A unique identifier for a crate
#[derive(Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Debug, Hash)]
struct Crate<'a>(&'a str); // (name,)
#[derive(Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Debug, Hash)]
struct CrateVersion<'a>(&'a str, &'a str); // (name, version)
impl<'a> Crate<'a> {
pub fn id_str(&self) -> String {
format!("{} ", self.0)
}
}
impl<'a> CrateVersion<'a> {
/// Returns the struct and whether or not the dep is in-tree
pub fn from_str(s: &'a str) -> (Self, bool) {
let mut parts = s.split(' ');
let name = parts.next().unwrap();
let version = parts.next().unwrap();
let path = parts.next().unwrap();
let is_path_dep = path.starts_with("(path+");
(CrateVersion(name, version), is_path_dep)
}
pub fn id_str(&self) -> String {
format!("{} {}", self.0, self.1)
}
}
impl<'a> From<CrateVersion<'a>> for Crate<'a> {
fn from(cv: CrateVersion<'a>) -> Crate<'a> {
Crate(cv.0)
}
}
/// Checks the dependency at the given path. Changes `bad` to `true` if a check failed.
///
/// Specifically, this checks that the license is correct.
pub fn check(path: &Path, bad: &mut bool) {
// Check licences
let path = path.join("vendor");
assert!(path.exists(), "vendor directory missing");
let mut saw_dir = false;
for dir in t!(path.read_dir()) { | saw_dir = true;
let dir = t!(dir);
// skip our exceptions
let is_exception = EXCEPTIONS.iter().any(|exception| {
dir.path()
.to_str()
.unwrap()
.contains(&format!("src/vendor/{}", exception))
});
if is_exception {
continue;
}
let toml = dir.path().join("Cargo.toml");
*bad = *bad || !check_license(&toml);
}
assert!(saw_dir, "no vendored source");
}
/// Checks the dependency of WHITELIST_CRATES at the given path. Changes `bad` to `true` if a check
/// failed.
///
/// Specifically, this checks that the dependencies are on the WHITELIST.
pub fn check_whitelist(path: &Path, cargo: &Path, bad: &mut bool) {
// Get dependencies from cargo metadata
let resolve = get_deps(path, cargo);
// Get the whitelist into a convenient form
let whitelist: HashSet<_> = WHITELIST.iter().cloned().collect();
// Check dependencies
let mut visited = BTreeSet::new();
let mut unapproved = BTreeSet::new();
for &krate in WHITELIST_CRATES.iter() {
let mut bad = check_crate_whitelist(&whitelist, &resolve, &mut visited, krate, false);
unapproved.append(&mut bad);
}
if !unapproved.is_empty() {
println!("Dependencies not on the whitelist:");
for dep in unapproved {
println!("* {}", dep.id_str());
}
*bad = true;
}
check_crate_duplicate(&resolve, bad);
}
fn check_license(path: &Path) -> bool {
if !path.exists() {
panic!("{} does not exist", path.display());
}
let mut contents = String::new();
t!(t!(File::open(path)).read_to_string(&mut contents));
let mut found_license = false;
for line in contents.lines() {
if !line.starts_with("license") {
continue;
}
let license = extract_license(line);
if !LICENSES.contains(&&*license) {
println!("invalid license {} in {}", license, path.display());
return false;
}
found_license = true;
break;
}
if !found_license {
println!("no license in {}", path.display());
return false;
}
true
}
fn extract_license(line: &str) -> String {
let first_quote = line.find('"');
let last_quote = line.rfind('"');
if let (Some(f), Some(l)) = (first_quote, last_quote) {
let license = &line[f + 1..l];
license.into()
} else {
"bad-license-parse".into()
}
}
/// Get the dependencies of the crate at the given path using `cargo metadata`.
fn get_deps(path: &Path, cargo: &Path) -> Resolve {
// Run `cargo metadata` to get the set of dependencies
let output = Command::new(cargo)
.arg("metadata")
.arg("--format-version")
.arg("1")
.arg("--manifest-path")
.arg(path.join("Cargo.toml"))
.output()
.expect("Unable to run `cargo metadata`")
.stdout;
let output = String::from_utf8_lossy(&output);
let output: Output = serde_json::from_str(&output).unwrap();
output.resolve
}
/// Checks the dependencies of the given crate from the given cargo metadata to see if they are on
/// the whitelist. Returns a list of illegal dependencies.
fn check_crate_whitelist<'a, 'b>(
whitelist: &'a HashSet<Crate>,
resolve: &'a Resolve,
visited: &'b mut BTreeSet<CrateVersion<'a>>,
krate: CrateVersion<'a>,
must_be_on_whitelist: bool,
) -> BTreeSet<Crate<'a>> {
// Will contain bad deps
let mut unapproved = BTreeSet::new();
// Check if we have already visited this crate
if visited.contains(&krate) {
return unapproved;
}
visited.insert(krate);
// If this path is in-tree, we don't require it to be on the whitelist
if must_be_on_whitelist {
// If this dependency is not on the WHITELIST, add to bad set
if !whitelist.contains(&krate.into()) {
unapproved.insert(krate.into());
}
}
// Do a DFS in the crate graph (it's a DAG, so we know we have no cycles!)
let to_check = resolve
.nodes
.iter()
.find(|n| n.id.starts_with(&krate.id_str()))
.expect("crate does not exist");
for dep in to_check.dependencies.iter() {
let (krate, is_path_dep) = CrateVersion::from_str(dep);
let mut bad = check_crate_whitelist(whitelist, resolve, visited, krate, !is_path_dep);
unapproved.append(&mut bad);
}
unapproved
}
fn check_crate_duplicate(resolve: &Resolve, bad: &mut bool) {
const FORBIDDEN_TO_HAVE_DUPLICATES: &[&str] = &[
// These two crates take quite a long time to build, let's not let two
// versions of them accidentally sneak into our dependency graph to
// ensure we keep our CI times under control
// "cargo", // FIXME(#53005)
"rustc-ap-syntax",
];
let mut name_to_id: HashMap<_, Vec<_>> = HashMap::new();
for node in resolve.nodes.iter() {
name_to_id.entry(node.id.split_whitespace().next().unwrap())
.or_default()
.push(&node.id);
}
for name in FORBIDDEN_TO_HAVE_DUPLICATES {
if name_to_id[name].len() <= 1 {
continue
}
println!("crate `{}` is duplicated in `Cargo.lock`", name);
for id in name_to_id[name].iter() {
println!(" * {}", id);
}
*bad = true;
}
} | |
models.py | import uuid
from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
from django.urls import reverse
class Book(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(max_length=250)
author = models.CharField(max_length=250)
price = models.DecimalField(max_digits=6, decimal_places=2)
cover = models.ImageField(upload_to='covers/', blank=True)
class Meta:
indexes = [
models.Index(fields=['id'], name='id_index'),
]
permissions = [
('special_status', 'Can read all books'),
]
def | (self):
return self.title
def get_absolute_url(self):
return reverse('book_detail', args=[str(self.id)])
class Review(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE, related_name='reviews', )
review = models.CharField(max_length=250)
author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
def __str__(self):
return self.review
| __str__ |
options.go | /*
Copyright 2020 The OpenYurt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"net"
"path/filepath"
"time"
"github.com/spf13/pflag"
"github.com/openyurtio/openyurt/pkg/projectinfo"
"github.com/openyurtio/openyurt/pkg/yurthub/storage/disk"
"github.com/openyurtio/openyurt/pkg/yurthub/util"
)
const (
DummyIfCIDR = "169.254.0.0/16"
ExclusiveCIDR = "169.254.31.0/24"
)
// YurtHubOptions is the main settings for the yurthub
type YurtHubOptions struct {
ServerAddr string
YurtHubHost string
YurtHubPort string
YurtHubProxyPort string
YurtHubProxySecurePort string
GCFrequency int
CertMgrMode string
KubeletRootCAFilePath string
KubeletPairFilePath string
NodeName string
NodePoolName string
LBMode string
HeartbeatFailedRetry int
HeartbeatHealthyThreshold int
HeartbeatTimeoutSeconds int
MaxRequestInFlight int
JoinToken string
RootDir string
Version bool
EnableProfiling bool
EnableDummyIf bool
EnableIptables bool
HubAgentDummyIfIP string
HubAgentDummyIfName string
DiskCachePath string
AccessServerThroughHub bool
EnableResourceFilter bool
DisabledResourceFilters []string
WorkingMode string
KubeletHealthGracePeriod time.Duration
}
// NewYurtHubOptions creates a new YurtHubOptions with a default config.
func | () *YurtHubOptions {
o := &YurtHubOptions{
YurtHubHost: "127.0.0.1",
YurtHubProxyPort: "10261",
YurtHubPort: "10267",
YurtHubProxySecurePort: "10268",
GCFrequency: 120,
CertMgrMode: util.YurtHubCertificateManagerName,
KubeletRootCAFilePath: util.DefaultKubeletRootCAFilePath,
KubeletPairFilePath: util.DefaultKubeletPairFilePath,
LBMode: "rr",
HeartbeatFailedRetry: 3,
HeartbeatHealthyThreshold: 2,
HeartbeatTimeoutSeconds: 2,
MaxRequestInFlight: 250,
RootDir: filepath.Join("/var/lib/", projectinfo.GetHubName()),
EnableProfiling: true,
EnableDummyIf: true,
EnableIptables: true,
HubAgentDummyIfIP: "169.254.2.1",
HubAgentDummyIfName: fmt.Sprintf("%s-dummy0", projectinfo.GetHubName()),
DiskCachePath: disk.CacheBaseDir,
AccessServerThroughHub: true,
EnableResourceFilter: true,
DisabledResourceFilters: make([]string, 0),
WorkingMode: string(util.WorkingModeEdge),
KubeletHealthGracePeriod: time.Second * 40,
}
return o
}
// ValidateOptions validates YurtHubOptions
func ValidateOptions(options *YurtHubOptions) error {
if len(options.NodeName) == 0 {
return fmt.Errorf("node name is empty")
}
if len(options.ServerAddr) == 0 {
return fmt.Errorf("server-address is empty")
}
if !util.IsSupportedLBMode(options.LBMode) {
return fmt.Errorf("lb mode(%s) is not supported", options.LBMode)
}
if !util.IsSupportedCertMode(options.CertMgrMode) {
return fmt.Errorf("cert manage mode %s is not supported", options.CertMgrMode)
}
if !util.IsSupportedWorkingMode(util.WorkingMode(options.WorkingMode)) {
return fmt.Errorf("working mode %s is not supported", options.WorkingMode)
}
if err := verifyDummyIP(options.HubAgentDummyIfIP); err != nil {
return fmt.Errorf("dummy ip %s is not invalid, %v", options.HubAgentDummyIfIP, err)
}
return nil
}
// AddFlags returns flags for a specific yurthub by section name
func (o *YurtHubOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.YurtHubHost, "bind-address", o.YurtHubHost, "the IP address on which to listen for the --serve-port port.")
fs.StringVar(&o.YurtHubPort, "serve-port", o.YurtHubPort, "the port on which to serve HTTP requests(like profiling, metrics) for hub agent.")
fs.StringVar(&o.YurtHubProxyPort, "proxy-port", o.YurtHubProxyPort, "the port on which to proxy HTTP requests to kube-apiserver")
fs.StringVar(&o.YurtHubProxySecurePort, "proxy-secure-port", o.YurtHubProxySecurePort, "the port on which to proxy HTTPS requests to kube-apiserver")
fs.StringVar(&o.ServerAddr, "server-addr", o.ServerAddr, "the address of Kubernetes kube-apiserver,the format is: \"server1,server2,...\"")
fs.StringVar(&o.CertMgrMode, "cert-mgr-mode", o.CertMgrMode, "the cert manager mode, hubself: auto generate client cert for hub agent.")
fs.StringVar(&o.KubeletRootCAFilePath, "kubelet-ca-file", o.KubeletRootCAFilePath, "the ca file path used by kubelet.")
fs.StringVar(&o.KubeletPairFilePath, "kubelet-client-certificate", o.KubeletPairFilePath, "the path of kubelet client certificate file.")
fs.IntVar(&o.GCFrequency, "gc-frequency", o.GCFrequency, "the frequency to gc cache in storage(unit: minute).")
fs.StringVar(&o.NodeName, "node-name", o.NodeName, "the name of node that runs hub agent")
fs.StringVar(&o.LBMode, "lb-mode", o.LBMode, "the mode of load balancer to connect remote servers(rr, priority)")
fs.IntVar(&o.HeartbeatFailedRetry, "heartbeat-failed-retry", o.HeartbeatFailedRetry, "number of heartbeat request retry after having failed.")
fs.IntVar(&o.HeartbeatHealthyThreshold, "heartbeat-healthy-threshold", o.HeartbeatHealthyThreshold, "minimum consecutive successes for the heartbeat to be considered healthy after having failed.")
fs.IntVar(&o.HeartbeatTimeoutSeconds, "heartbeat-timeout-seconds", o.HeartbeatTimeoutSeconds, " number of seconds after which the heartbeat times out.")
fs.IntVar(&o.MaxRequestInFlight, "max-requests-in-flight", o.MaxRequestInFlight, "the maximum number of parallel requests.")
fs.StringVar(&o.JoinToken, "join-token", o.JoinToken, "the Join token for bootstrapping hub agent when --cert-mgr-mode=hubself.")
fs.StringVar(&o.RootDir, "root-dir", o.RootDir, "directory path for managing hub agent files(pki, cache etc).")
fs.BoolVar(&o.Version, "version", o.Version, "print the version information.")
fs.BoolVar(&o.EnableProfiling, "profiling", o.EnableProfiling, "enable profiling via web interface host:port/debug/pprof/")
fs.BoolVar(&o.EnableDummyIf, "enable-dummy-if", o.EnableDummyIf, "enable dummy interface or not")
fs.BoolVar(&o.EnableIptables, "enable-iptables", o.EnableIptables, "enable iptables manager to setup rules for accessing hub agent")
fs.StringVar(&o.HubAgentDummyIfIP, "dummy-if-ip", o.HubAgentDummyIfIP, "the ip address of dummy interface that used for container connect hub agent(exclusive ips: 169.254.31.0/24, 169.254.1.1/32)")
fs.StringVar(&o.HubAgentDummyIfName, "dummy-if-name", o.HubAgentDummyIfName, "the name of dummy interface that is used for hub agent")
fs.StringVar(&o.DiskCachePath, "disk-cache-path", o.DiskCachePath, "the path for kubernetes to storage metadata")
fs.BoolVar(&o.AccessServerThroughHub, "access-server-through-hub", o.AccessServerThroughHub, "enable pods access kube-apiserver through yurthub or not")
fs.BoolVar(&o.EnableResourceFilter, "enable-resource-filter", o.EnableResourceFilter, "enable to filter response that comes back from reverse proxy")
fs.StringSliceVar(&o.DisabledResourceFilters, "disabled-resource-filters", o.DisabledResourceFilters, "disable resource filters to handle response")
fs.StringVar(&o.NodePoolName, "nodepool-name", o.NodePoolName, "the name of node pool that runs hub agent")
fs.StringVar(&o.WorkingMode, "working-mode", o.WorkingMode, "the working mode of yurthub(edge, cloud).")
fs.DurationVar(&o.KubeletHealthGracePeriod, "kubelet-health-grace-period", o.KubeletHealthGracePeriod, "the amount of time which we allow kubelet to be unresponsive before stop renew node lease")
}
// verifyDummyIP verify the specified ip is valid or not
func verifyDummyIP(dummyIP string) error {
//169.254.2.1/32
dip := net.ParseIP(dummyIP)
if dip == nil {
return fmt.Errorf("dummy ip %s is invalid", dummyIP)
}
_, dummyIfIPNet, err := net.ParseCIDR(DummyIfCIDR)
if err != nil {
return fmt.Errorf("cidr(%s) is invalid, %v", DummyIfCIDR, err)
}
if !dummyIfIPNet.Contains(dip) {
return fmt.Errorf("dummy ip %s is not in cidr(%s)", dummyIP, DummyIfCIDR)
}
_, exclusiveIPNet, err := net.ParseCIDR(ExclusiveCIDR)
if err != nil {
return fmt.Errorf("cidr(%s) is invalid, %v", ExclusiveCIDR, err)
}
if exclusiveIPNet.Contains(dip) {
return fmt.Errorf("dummy ip %s is in reserved cidr(%s)", dummyIP, ExclusiveCIDR)
}
if dummyIP == "169.254.1.1" {
return fmt.Errorf("dummy ip is a reserved ip(%s)", dummyIP)
}
return nil
}
| NewYurtHubOptions |
page.go | package test
import (
"fmt"
"net/url"
"github.com/google/uuid"
"github.com/murlokswarm/app"
"github.com/murlokswarm/app/internal/core"
"github.com/murlokswarm/app/internal/dom"
"github.com/pkg/errors"
)
// Page is a test page that implements the app.Page interface.
type Page struct {
core.Page
driver *Driver
dom *dom.DOM
history *core.History
id string
compo app.Compo
}
func newPage(d *Driver, c app.PageConfig) *Page {
p := &Page{
driver: d,
dom: dom.NewDOM(d.factory, dom.JsToGoHandler),
history: core.NewHistory(),
id: uuid.New().String(),
}
d.elems.Put(p)
if len(c.URL) != 0 {
p.Load(c.URL)
}
return p
}
// ID satisfies the app.Page interface.
func (p *Page) ID() string {
return p.id
}
// Load satisfies the app.Page interface.
func (p *Page) Load(urlFmt string, v ...interface{}) {
var err error
defer func() {
p.SetErr(err)
}()
u := fmt.Sprintf(urlFmt, v...)
n := core.CompoNameFromURLString(u)
var c app.Compo
if c, err = p.driver.factory.NewCompo(n); err != nil {
return
}
if p.compo != nil {
p.dom.Clean()
}
p.compo = c
if u != p.history.Current() {
p.history.NewEntry(u)
}
_, err = p.dom.New(c)
}
// Compo satisfies the app.Page interface.
func (p *Page) Compo() app.Compo {
return p.compo
}
// Contains satisfies the app.Page interface.
func (p *Page) Contains(c app.Compo) bool {
return p.dom.Contains(c)
}
// Render satisfies the app.Page interface.
func (p *Page) Render(c app.Compo) {
_, err := p.dom.Update(c)
p.SetErr(err)
}
// Reload satisfies the app.Page interface.
func (p *Page) Reload() {
u := p.history.Current()
if len(u) == 0 {
p.SetErr(errors.New("no component loaded"))
return
}
p.Load(u)
}
// CanPrevious satisfies the app.Page interface.
func (p *Page) CanPrevious() bool {
return p.history.CanPrevious()
}
// Previous satisfies the app.Page interface.
func (p *Page) Previous() {
u := p.history.Previous()
if len(u) == 0 {
p.SetErr(nil)
return
}
p.Load(u)
}
// CanNext satisfies the app.Page interface.
func (p *Page) CanNext() bool {
return p.history.CanNext()
}
// Next satisfies the app.Page interface.
func (p *Page) Next() {
u := p.history.Next() | return
}
p.Load(u)
}
// URL satisfies the app.Page interface.
func (p *Page) URL() *url.URL {
u, err := url.Parse(p.history.Current())
p.SetErr(err)
return u
}
// Referer satisfies the app.Page interface.
func (p *Page) Referer() *url.URL {
p.SetErr(nil)
return nil
}
// Close satisfies the app.Page interface.
func (p *Page) Close() {
p.driver.elems.Delete(p)
p.SetErr(nil)
} |
if len(u) == 0 {
p.SetErr(nil) |
ksfdtimeseries.py | """
MPI-aware read and write PETSc Vec to HDF5
The goal of this module is to save snapshots of a PETSc Vec to HDF5
files, and obviously to read them again later. The obvious way to do
this is parallel HDF5. Unfortunately, distributions of HDF5 and h5py
may be built without support for parallel operation. (In particular,
the conda-forge version doesn't have it.) This is accomplished through
the following kludge:
When a KSFD.TimeSeries is created with name tsname and argument mpiok
True, the runtime envirnoment is checked to find out if parallel HDF5
is enabled (using h5py.getconfig().mpi). If so, the data are stored in
an HDF5 file named
'{name}MPI.h5'.format(name=tsname).
Note: there is a serious problem with parallel HDF5: variable length
records can't be written. If you try, you get this exception:
OSError: Can't write data (Parallel IO does not support writing VL
datatypes yet)
Since that makes parallel HDF5 a nonstarter for my purposes, mpiok
defaults to False. You won't get parallel MPI unless you specifically
ask for it, and then dealing with the lack of VL records is your
problem.
If not, each process stores the data it owns in a file named
'{name}s{size}r{rank}.h5'.format(name=tsname, size=comm.size, rank=comm.rank)
where comm is the MPI communicator. If run sequentially the data will
all be stored in a file called '{name}s1r0.h5'. It is intended that
the *MPI.h5 file created using parallele HDF5 and the *s1r0.h5 file
created when running sequentially and parallel HDF5 is not available
will be the same.
The same procedure is used for finding the filename when opening in
read/write mode ('r+' or 'a').
When opening a TimeSeries for read (mode 'r') TimeSeries checks (in
order) for the *s<size>r<rank>.h5 file, then the *MPI.h5 file ,and
finally a *s1r0.h5 file, and opens the first it finds. In this case
the retrieve methods will only return the components of the vector
owned by the local process.
Finally, I will write a simple script to merge all the files of
*s<size>r<rank>.h5 series into a single *MPI.h5 file. In this way an
MPi process group of any size will be able to retrieve data written by
a process group of any size.
"""
import h5py, os, re, gc, time
import traceback as tb
import numpy as np
import petsc4py
from mpi4py import MPI
#
# These imports are placed inside a try/except so that this script can
# be executed standalone to check for syntax errors.
#
try:
from .ksfddebug import log
from .ksfdgrid import Grid
except ImportError:
from ksfddebug import log
from ksfdgrid import Grid
def logSERIES(*args, **kwargs):
log(*args, system='SERIES', **kwargs)
class KSFDTimeSeries:
"""
Base class for TimeSeries
KSFDTimeSeries is intended as an abstract base class for reading and
writing time series from KSFD solutions to HDF5 files. It is not
formally defined as an ABC: you can instantiate it if you really
wish, but it is not designed to make that a useful thing to do.
"""
def __init__(
self,
basename,
size=1,
rank=0,
mpiok=False,
mode='r+',
retries=0,
retry_interval=60
):
"""
Required parameter:
basename: the prefix of the filename.
Optional keyword parameters:
size=1: Number of MPI processes. This typically corresponds to
comm.size for an MPI communicator comm.
rank=0: Number of the MPI process that created this
file. Typically comm.rank.
mpiok=True: Whether parallel HDF5 should be used to store to
store all the data from all MPI processes in a single
file.
mode='r+': The file mode for opening the h5py.File.
retries=0. If nonzero, retry faile dopens this many times.
retry_interval=60: time (in secodns) between successive
retries. Note: the open will block while waiting for a
successful retry.
size, rank, and mpiok are used mostly to figure out what
filename to use. They need not correspond to the actual
current MPU configuration. For instance, they may correspond
to the config when the time series was created.
"""
self.get_filename(basename, size, rank, mpiok, mode)
self.retries = retries
self.retry_interval = retry_interval
self._size = size
self._rank = rank
self._mode = mode
self._tsf = self.open_with_retry()
_ = self.info # make sure '/info' exists
self.try_to_set('size', self.size)
self.try_to_set('rank', self.rank)
if 'times' in self.tsf:
self.ts = np.array(self.tsf['times'][()])
try:
self.ks = np.array(self.tsf['ks'][()])
except KeyError:
self.ks = np.arange(len(self.ts))
self.order = np.array(self.tsf['order'][()])
else:
self.ts = np.array([], dtype=float)
self.ks = np.array([], dtype=int)
self.order = np.array([], dtype=int)
self.lastk = self.ks.size - 1
self.sorted = False
self.tsf.flush()
def parse_filename(filename):
"""
filename is a name like 'bases2r1.h5'. parse_filename returns
(basename, size, rank, mpi) (('base', 2, 1, False) for the
example). For a filename like 'tests/test1mpi.h5', returns
('base', 1, 0, True).
"""
mpipat = '(.*)MPI\.h5'
nompi_pat = '(.*)s(\d+)r(\d+)\.h5'
res = re.fullmatch(mpipat, filename)
if res:
return (res[1], 1, 0, True)
res = re.fullmatch(nompi_pat, filename)
if res:
return (res[1], res[2], res[3], False)
raise ValueError(
"Couldn't parse filename {fname}".format(fname=filename)
)
def set_grid(self, grid):
self._grid = grid
self._dim = grid.dim
self._dof = grid.dof
if self.rank_owns_file:
self._ranges = grid.ranges
# if (
# 'ranges' in self.tsf and
# not np.all(self.tsf['ranges'][()] == self.ranges)
# ):
# raise ValueError(
# "data ranges {filerange} in {file} doesn't " +
# "match grid range {gridrange}".format(
# filerange=str(self.tsf['ranges'][()]),
# file=self.filename,
# gridrange=str(grid.ranges)
# )
# )
self.myslice = (slice(0, None),)*(self.dim + 1)
else:
self._ranges = tuple((0, np) for np in grid.nps)
#
# Slice of the global array belonging to this process:
self.myslice = (slice(0, None),) + tuple(
slice(*r) for r in grid.ranges
)
self.try_to_set('ranges', self.ranges)
def get_filename(self, basename, size=1, rank=0, mpiok=True,
mode='r+'):
"""
Get name of file to be opened by this process
self.filename is set to the name of the HDF5 file to be
opened. This is also returned as the function value. In
addition, the following flags are set:
self.creating: True if creating a new file.
self.rank_owns_file: True if the file will be exclusively
owned by this process.
"""
self.usempi = mpiok and h5py.get_config().mpi
name_nompi = '{name}s{size}r{rank}.h5'.format(
name=basename,
size=size,
rank=rank
)
name_mpi = '{name}MPI.h5'.format(name=basename)
name_seq = '{name}s1r0.h5'.format(name=basename)
self.driver = None
if self.usempi and os.path.isfile(name_mpi):
self.creating = mode[0] == 'w' or mode[0] == 'x'
self.rank_owns_file = size == 1
self.filename = name_mpi
elif self.usempi and (mode[0] == 'w' or mode[0] == 'x'):
self.creating = True
self.rank_owns_file = size == 1
self.filename = name_mpi
elif os.path.isfile(name_nompi):
self.creating = mode[0] == 'w' or mode[0] == 'x'
self.rank_owns_file = True
self.filename = name_nompi
elif (mode == 'r' or mode == 'a') and os.path.isfile(name_seq):
self.creating = False
self.rank_owns_file = size == 1
self.filename = name_seq
# Allow reading from MPi file even if we're not using MPI:
elif (mode == 'r' or mode == 'a') and os.path.isfile(name_mpi):
self.creating = False
self.rank_owns_file = size == 1
self.filename = name_mpi
else:
self.creating = mode != 'r'
self.rank_owns_file = not self.usempi
self.filename = name_mpi if self.usempi else name_nompi
if self.creating and not self.rank_owns_file and self.usempi:
self.driver = 'mpio'
if self.creating:
os.makedirs(os.path.dirname(self.filename), exist_ok=True)
logSERIES('self.filename', self.filename)
logSERIES('self.creating', self.creating)
logSERIES('self.rank_owns_file', self.rank_owns_file)
logSERIES('self.driver', self.driver)
logSERIES('self.usempi', self.usempi)
return self.filename
def open(self, filename, usempi, mode):
if mode in ['w', 'w-', 'x', 'a']:
dirname = os.path.dirname(os.path.abspath(filename))
try:
os.makedirs(dirname, exist_ok=True)
except FileExistsError:
pass
def grid_save(self):
grid = self.grid
attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',
'stencil_width', 'stencil_type', 'boundary_type',
'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',
'Vlshape', 'ranges', 'Clshape', 'Cashape',
'coordsNoGhosts', 'coordsWithGhosts',
]
for a in attrs:
self.try_to_set('/grid/' + a, getattr(grid, a))
def grid_read(self):
"""Reads grid params from open file, returns dict"""
ggroup = self.tsf['grid']
gd = {}
attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',
'stencil_width', 'stencil_type', 'boundary_type',
'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',
'Vlshape', 'ranges', 'Clshape', 'Cashape',
'coordsNoGhosts', 'coordsWithGhosts',
]
for a in attrs:
try:
val = ggroup[a][()]
if a.endswith('shape'):
gd[a] = tuple(val)
elif np.isscalar(val):
gd[a] = val.item()
else:
gd[a] = val
except KeyError:
gd[a] = None
gd['width'] = gd['bounds'][0]
gd['height'] = gd['bounds'][1] if gd['dim'] > 1 else 1.0
gd['depth'] = gd['bounds'][2] if gd['dim'] > 2 else 1.0
gd['nx'] = gd['nps'][0]
gd['ny'] = gd['nps'][1] if gd['dim'] > 1 else 8
gd['nz'] = gd['nps'][2] if gd['dim'] > 2 else 8
return gd
def grid_load(self, gd=None):
"""Reads grid params from open file and creates new Grid."""
if gd is None:
gd = self.grid_read()
grid = Grid(
dim=gd['dim'],
width=gd['width'],
height=gd['height'],
depth=gd['depth'],
nx=gd['nx'],
ny=gd['ny'],
nz=gd['nz'],
dof=gd['dof'],
order=gd['order'],
stencil_width=gd['stencil_width'],
stencil_type=gd['stencil_type'],
boundary_type=gd['boundary_type']
)
self.set_grid(grid)
#
# info is a place for caller to store stuff
@property
def info(self):
"""Place for caller to store extra stuff"""
if not hasattr(self, '_info') or not self._info:
self._info = self.tsf.require_group('/info')
return self._info
@property
def tsFile(self):
"""The open h5File object"""
return self._tsf
@property
def tsf(self):
return self._tsf
@property
def size(self):
return self._size
@property
def rank(self):
return self._rank
@property
def mode(self):
return self._mode
@property
def ranges(self):
return self._ranges
@property
def comm(self):
return self._comm
@property
def grid(self):
return self._grid
@property
def dim(self):
return self._dim
@property
def dof(self):
return self._dof
def try_to_set(self, key, val):
"""Try to set self.tsf[key] to val, but ignore exceptions"""
if (self.mode == 'r'): return
try:
del self.tsf[key]
except KeyError:
pass
try:
self.tsf[key] = val
except ValueError:
pass
def _sort(self):
if getattr(self, 'sorted', False): return
ts = getattr(self, 'ts', np.array([]))
self.try_to_set('times', ts)
self.order = ts.argsort()
self.try_to_set('order', self.order)
self.sts = ts
self.sts.sort()
ks = getattr(self, 'ks', [])
lastk = getattr(self, 'lastk', -1)
self.try_to_set('ks', ks)
self.try_to_set('lastk', lastk)
self.sorted = True
def flush(self):
self._sort()
self.tsf.flush()
def temp_close(self):
"""
temp_close closes the HDF5 file in which the TimeSeries is
stored without destroying associated information. The file
can be reopened with little loss of time. temp_close and
reopen are intended for use during long solutions. If there is
a crash during solution, a temp-closed TimeSeries will be left
in a valid state for later use.
"""
self._sort()
self.tsf.close()
def open_with_retry(
self,
fname=None,
mode=None,
driver=None,
comm=None
):
if fname is None:
fname = self.filename
if mode is None:
mode = self.mode
if driver is None:
driver = self.driver
if comm is None:
comm = self.comm
if isinstance(comm, petsc4py.PETSc.Comm):
comm = comm.tompi4py()
logSERIES('fname, mode, driver, comm', fname, mode, driver, comm)
try:
if driver == 'mpio':
logSERIES('trying 4-argument open')
comm.Barrier()
logSERIES('comm.rank, comm.size', comm.rank, comm.size)
tsf = h5py.File(fname, mode=mode,
driver=driver, comm=comm)
else:
logSERIES('trying 3-argument open')
tsf = h5py.File(fname, mode=mode,
driver=driver)
except OSError:
retries_left = self.retries
if retries_left <= 0:
logSERIES('open failed: re-raising exception')
raise
while retries_left > 0:
logSERIES('reopen failed with OSError: {n} retries left'.format(
n=retries_left
))
logSERIES('tb.format_exc()', tb.format_exc())
time.sleep(self.retry_interval)
try:
if driver == 'mpio':
logSERIES('trying 4-argument open')
comm.Barrier()
logSERIES('comm.rank, comm.size', comm.rank, comm.size)
tsf = h5py.File(fname, mode=mode,
driver=driver, comm=comm)
else:
logSERIES('trying 3-argument open')
tsf = h5py.File(fname, mode=mode,
driver=driver)
failed = False
except OSError:
failed = True
if retries_left <= 1:
raise
if not failed:
break
retries_left -= 1
return tsf
def reopen(self):
"""
Reopen a temp_closed TimeSeries
"""
mode = self.mode if self.mode == 'r' else 'r+'
self._tsf = self.open_with_retry(mode=mode)
def close(self):
if not hasattr(self, '_tsf') or not self._tsf:
self.reopen()
self._sort()
self.tsf.close()
del self._tsf
gc.collect()
# def __del__(self):
# self.close()
def store(self, data, t, k=None):
if isinstance(data, petsc4py.PETSc.Vec):
vals = data.array.reshape(self.grid.Vlshape, order='F')
else:
vals = data.reshape(self.grid.Vlshape, order='F')
logSERIES('k, t', k, t)
if k is None:
k = self.lastk + 1
self.lastk = k
self.ks = np.append(self.ks, k)
self.ts = np.append(self.ts, t)
key = 'data' + str(k)
try:
dset = self.tsf.create_dataset(key, self.grid.Vlshape,
dtype=vals.dtype)
except OSError:
dset = self.tsf[key] # dset already exists
Cvals = vals.copy(order='C') # h5py requires C order
if self.rank_owns_file:
dset.write_direct(Cvals)
else:
dset[self.myslice] = Cvals
dset.attrs['k'] = k
dset.attrs['t'] = t
self.sorted = False
self.tsf.flush()
def store_slice(self, ranges, data, t, tol=1e-7):
shape = (self.grid.dof,) + tuple(
r[1] - r[0] for r in ranges
)
slc = (slice(0, None),) + tuple(
slice(*r) for r in ranges
)
vals = data.reshape(shape, order='F')
na, nb, ta, tb = self.find_time(t)
logSERIES('na, nb, ta, tb', na, nb, ta, tb)
if abs(t-ta) <= abs(tb-t):
n, tn = na, ta
else:
n, tn = nb, tb
if (
(not (t == 0.0 and tn == 0.0)) and
((self.sts.size <= n) or
(abs(t-tn)/max(abs(t), abs(tn)) > tol))
):
#
# New time point: append it to the lists
#
k = self.lastk + 1
self.lastk = k
self.ks = np.append(self.ks, k)
self.ts = np.append(self.ts, t)
key = 'data' + str(k)
dset = self.tsf.create_dataset(key, self.grid.Vlshape,
dtype=vals.dtype)
logSERIES('k, t', k, t)
dset.attrs['k'] = k
dset.attrs['t'] = t
self.sorted = False
else:
k = n
key = 'data' + str(k)
dset = self.tsf[key]
dset[slc] = vals
self.tsf.flush()
def times(self):
self._sort()
return self.ts
def | (self):
self._sort()
return self.ks
def sorted_times(self):
self._sort()
return self.sts
def sorted_steps(self):
self._sort()
return self.order
def retrieve_by_number(self, k):
key = 'data' + str(k)
dset = self.tsf[key]
if self.rank_owns_file:
return np.array(dset)
else:
return np.array(dset)[self.myslice]
def find_time(self, t):
"""
Find the time points closest to t
Returns tuple (a, b, ta, tb)
a and b are the numbers (ints) of the points flanking t. ta
and tb (floats) are the corresponding times. If there is a
time point exactly matchig nt, than a == b, ta == tb == t.
"""
self._sort()
if self.sts.size == 0:
return (0, 0, t - 1.0, t - 1.0)
if (t <= self.sts[0]):
a = 0
return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])
elif (t >= self.sts[-1]):
a = len(self.sts) - 1
return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])
else:
b = self.sts.searchsorted(t)
nb = self.order[b]
tb = self.sts[b]
if (b >= len(self.order) - 1):
return(b, b, self.sts[b], self.sts[b])
elif tb == t:
return(b, b, tb, tb)
a = b - 1
na = self.order[a]
ta = self.sts[a]
return (a, b, ta, tb)
def retrieve_by_time(self, t):
"""
Retrieve a time point.
Arguments:
t: the time to be retrieved.
"""
na, nb, ta, tb = self.find_time(t)
adata = self.retrieve_by_number(na)
if na == nb:
return adata
bdata = self.retrieve_by_number(nb)
data = ((t-ta)*bdata + (tb-t)*adata)/(tb-ta)
return(data)
class TimeSeries(KSFDTimeSeries):
def __init__(
self,
basename,
grid=None,
comm=None,
mpiok=False,
mode='r+',
retries=0,
retry_interval=60
):
"""
Open a KSFD.TimeSeries
Required parameters:
basename: the name of the TimeSeries. (This is a prefix of the
names of the HDF5 files in which data are stored.)
Optional parameters:
grid: The KSFD.Grid on which the PETSc Vecs to be saved are
defined. This must be supplied when creating a new
TimeSeries. When opening an existig nseries, it will be
read from the file if not supplied.
comm: the MPI communicator. (If not supplied, grid.comm is
used.)
mpiok=False: whether it is Ok to use parallel HDF5.
mode: the file mode (See h5py.h5File.)
retries=0. If nonzero, retry faile dopens this many times.
retry_interval=60: time (in secodns) between successive
retries. Note: the open will block while waiting for a
successful retry.
"""
if comm:
self._comm = comm
elif grid:
self._comm = grid.comm
else:
self._comm = MPI.COMM_SELF
self._mode = mode
self._size = self.comm.size
self._rank = self.comm.rank
self.mpiok = mpiok
super().__init__(basename, size=self.size, rank=self.rank,
mpiok=mpiok, mode=mode, retries=retries,
retry_interval=retry_interval)
if (grid):
self.set_grid(grid)
self.grid_save()
else:
self.grid_load()
class Gatherer(KSFDTimeSeries):
"""
Gatherer is a special-purpose iterator to allow a single
sequential process to read the separate files written by a
TimeSeries run under MPI. For instance, to reconstruct the global
vector at the last time (assuming it fits in memory in a single
process):
gather = Gatherer(basename='base', size=4)
grid = gather.grid
lastk = gather.sorted_steps()[-1]
vec = grid.Vdmda.createGlobalVec()
vecarray = vec.array.reshape(grid.globalVshape, order='F')
for series in gather:
vec = grid.Vdmda.createGlobalVec()
rank = series.rank
vecarray[series.slice] = series.retrieve_by_number(lastk)
<do something with vec...>
This gatherer would iterate through files bases4r0.h5,
bases4r1.h5, bases4r2.h5, and bases4r3.h5. Note that with every
iteration it closes the last file and opens the next. Thus, if you
want to iterate over all times, it is more efficient to nest the
loops like this:
for series in gather:
for t in series.times():
<do something for this file at this time)
than the other way. (The other way would be more intuitive, but my
expectation is that this class will be used mostly to gather all
TimeSeries files into a single file, which then can be processed
efficiently as a TimeSeries.)
"""
def __init__(
self,
basename,
size=None,
retries=0,
retry_interval=60
):
"""
Required positional parameter
basename: the prefix of the filenames for the TimeSeries being
read. As a convenience, this can be a special filename
that matches the regular expression '(.+)s(\d+)@.*' (That
is a literal '@'. Then the basename is the (.+) and the
size is the (\d+) following the 's' and preceding
'@'. For example, "bases4@' or '[email protected]' would both
serve for a series with basename 'base' and size 4.
Optional keyword parameter:
size=None: This argument can be omitted only if the basename
has the special @ filename format. Otherwise, it must be
supplied.
Gatherer is read-only (mode 'r').
"""
self._comm = MPI.COMM_SELF
self.retries = retries
self.retry_interval = retry_interval
gatherre = '(.+)s(\d+)@.*'
fname_match = re.fullmatch(gatherre, basename)
if fname_match:
base = fname_match[1]
size = int(fname_match[2])
else:
base = basename
size = size
self.basename = base
if not isinstance(size, int) or size <= 0:
raise ValueError(
'size {size} is not a positive int'
)
#
# This opens the first file. We have to do that so as to read
# and initialize things like grid, times, etc.
#
super().__init__(
basename=base,
size=size,
rank=0,
mpiok=False,
mode='r',
retries=retries,
retry_interval=retry_interval
)
self.set_ranges()
#
# Since we have to open the rank 0 file before startig
# iteration, the following flag is used to determine whether
# to open a new file when __iter__ is called
#
self.iter_started = False
self.iter_stopped = False
def set_ranges(self):
self.rank_owns_file = True
gd = self.grid_read()
self.grid_load(gd)
self._ranges = gd['ranges']
self._shape = (self.dof,) + tuple(
r[1] - r[0] for r in self.ranges
)
self._slice = (slice(0, None),) + tuple(
slice(*r) for r in self.ranges
)
@property
def slice(self):
return self._slice
@property
def shape(self):
return self._shape
def __iter__(self):
return self
def __next__(self):
if self.iter_stopped:
#
# We previously exhausted the iteration. Restart it
#
self.tsf.close()
self.__init__(self.basename,
self.size,
retries=self.retries,
retry_interval=self.retry_interval
)
elif self.iter_started:
#
# We're not just starting: move on to next file
#
self.tsf.close()
self._rank = self.rank + 1
if self.rank >= self.size:
self.iter_stopped = True
raise StopIteration
super().__init__(
basename=self.basename,
size=self.size,
rank=self.rank,
mpiok=False,
mode='r',
retries=self.retries,
retry_interval=self.retry_interval
)
self.set_ranges()
self.iter_started = True
self.iter_stopped = False
return self
| steps |
JsInterpreterLogger.js | /** @file Observes a JSInterpreter and logs to the designated browser console. */
'use strict';
import Observer from './Observer';
/**
* Observer responsible for logging to the provided browser console when
* the interpreter it is observing raises log-worthy events.
* @constructor
* @param {Console} window console API
*/
var JsInterpreterLogger = function (outputConsole) {
/** @private {Console} */
this.outputConsole_ = outputConsole;
/** @private {Observer} */
this.observer_ = new Observer();
};
export default JsInterpreterLogger;
/**
* Attach the logger to a particular JSInterpreter instance.
* @param {JSInterpreter} jsInterpreter
*/
JsInterpreterLogger.prototype.attachTo = function (jsInterpreter) {
this.observer_.observe(jsInterpreter.onExecutionWarning,
this.log.bind(this));
};
/**
* Detach the logger from whatever interpreter instance it is currently
* attached to, unregistering handlers.
* Safe to call when the logger is already detached.
*/
JsInterpreterLogger.prototype.detach = function () {
this.observer_.unobserveAll();
};
/** | * @see Console.log
*/
JsInterpreterLogger.prototype.log = function () {
if (this.outputConsole_ && this.outputConsole_.log) {
this.outputConsole_.log.apply(this.outputConsole_, arguments);
}
}; | * Log to the console object we were constructed with.
* @param {*} arguments... |
leadersScrapper.py | from bs4 import BeautifulSoup
class LeadersScrapper: |
def scrap(self, html):
soup = BeautifulSoup(html, features="html.parser")
title = soup.find("h1").text
date = soup.find("div",{"class":"infos"}).text
data = [ arti.text for arti in soup.find("div", {"class":"article_body"}).findChildren()]
idx = data.index("Lire aussi")
article = " ".join(data[:idx])
return {"title":title, "date":date, "article":article} | |
operations_api.rs | use crate::common::server_fixture::ServerFixture;
use influxdb_iox_client::{management::generated_types::*, operations};
use std::time::Duration;
#[tokio::test]
async fn test_operations() {
let server_fixture = ServerFixture::create_single_use().await;
let mut management_client = server_fixture.management_client();
let mut operations_client = server_fixture.operations_client();
let running_ops = operations_client
.list_operations()
.await
.expect("list operations failed");
assert_eq!(running_ops.len(), 0);
let nanos = vec![Duration::from_secs(20).as_nanos() as _, 1];
let iox_operation = management_client
.create_dummy_job(nanos.clone())
.await
.expect("create dummy job failed");
let running_ops = operations_client
.list_operations()
.await
.expect("list operations failed");
assert_eq!(running_ops.len(), 1);
assert_eq!(running_ops[0].operation.name, iox_operation.operation.name);
let id = iox_operation.operation.id();
let iox_operation = operations_client.get_operation(id).await.unwrap();
let job = iox_operation.metadata.job.expect("expected a job");
assert_eq!(iox_operation.metadata.total_count, 2);
assert_eq!(iox_operation.metadata.pending_count, 1);
assert_eq!(
job,
operation_metadata::Job::Dummy(Dummy {
nanos,
db_name: String::new()
})
);
// Check wait times out correctly
let iox_operation = operations_client
.wait_operation(id, Some(Duration::from_micros(10)))
.await
.expect("failed to wait operation");
assert!(!iox_operation.operation.done);
// Shouldn't specify wall_nanos as not complete
assert_eq!(iox_operation.metadata.wall_nanos, 0);
let wait = tokio::spawn(async move {
let mut operations_client = server_fixture.operations_client();
operations_client
.wait_operation(id, None)
.await
.expect("failed to wait operation")
});
| operations_client
.cancel_operation(id)
.await
.expect("failed to cancel operation");
let waited = wait.await.unwrap();
assert!(waited.operation.done);
assert!(waited.metadata.wall_nanos > 0);
assert!(waited.metadata.cpu_nanos > 0);
assert_eq!(waited.metadata.pending_count, 0);
assert_eq!(waited.metadata.total_count, 2);
assert_eq!(waited.metadata.cancelled_count, 1);
match waited.operation.result {
Some(operations::generated_types::operation::Result::Error(status)) => {
assert_eq!(status.code, tonic::Code::Cancelled as i32)
}
_ => panic!("expected error"),
}
} | |
CutPlanner.py | from svgelements import *
class CutPlanner:
@staticmethod
def bounding_box(elements):
if isinstance(elements, SVGElement):
elements = [elements]
elif isinstance(elements, list):
try:
elements = [e.object for e in elements if isinstance(e.object, SVGElement)]
except AttributeError:
pass
boundary_points = []
for e in elements:
box = e.bbox(False)
if box is None:
continue
top_left = e.transform.point_in_matrix_space([box[0], box[1]])
top_right = e.transform.point_in_matrix_space([box[2], box[1]])
bottom_left = e.transform.point_in_matrix_space([box[0], box[3]])
bottom_right = e.transform.point_in_matrix_space([box[2], box[3]])
boundary_points.append(top_left)
boundary_points.append(top_right)
boundary_points.append(bottom_left)
boundary_points.append(bottom_right)
if len(boundary_points) == 0:
return None
xmin = min([e[0] for e in boundary_points])
ymin = min([e[1] for e in boundary_points])
xmax = max([e[0] for e in boundary_points])
ymax = max([e[1] for e in boundary_points])
return xmin, ymin, xmax, ymax
@staticmethod
def is_inside(inner_path, outer_path):
"""
Test that path1 is inside path2.
:param inner_path: inner path
:param outer_path: outer path
:return: whether path1 is wholely inside path2.
"""
if not hasattr(inner_path, 'bounding_box'):
inner_path.bounding_box = CutPlanner.bounding_box(inner_path)
if not hasattr(outer_path, 'bounding_box'):
outer_path.bounding_box = CutPlanner.bounding_box(outer_path)
if outer_path.bounding_box[0] > inner_path.bounding_box[0]:
# outer minx > inner minx (is not contained)
return False
if outer_path.bounding_box[1] > inner_path.bounding_box[1]:
# outer miny > inner miny (is not contained)
return False
if outer_path.bounding_box[2] < inner_path.bounding_box[2]:
# outer maxx < inner maxx (is not contained)
return False
if outer_path.bounding_box[3] < inner_path.bounding_box[3]:
# outer maxy < inner maxy (is not contained)
return False
if outer_path.bounding_box == inner_path.bounding_box:
if outer_path == inner_path: # This is the same object.
return False
if not hasattr(outer_path, 'vm'):
outer_path = Polygon([outer_path.point(i / 100.0, error=1e4) for i in range(101)])
vm = VectorMontonizer()
vm.add_cluster(outer_path)
outer_path.vm = vm
for i in range(101):
p = inner_path.point(i / 100.0, error=1e4)
if not outer_path.vm.is_point_inside(p.x, p.y):
return False
return True
@staticmethod
def optimize_cut_inside(paths):
optimized = Path()
if isinstance(paths, Path):
paths = [paths]
subpaths = []
for path in paths:
subpaths.extend([abs(Path(s)) for s in path.as_subpaths()])
for j in range(len(subpaths)):
for k in range(j + 1, len(subpaths)):
if CutPlanner.is_inside(subpaths[k], subpaths[j]):
t = subpaths[j]
subpaths[j] = subpaths[k]
subpaths[k] = t
for p in subpaths:
optimized += p
try:
del p.vm
except AttributeError:
pass
try:
del p.bounding_box
except AttributeError:
pass
return optimized
@staticmethod
def eulerian_fill(paths, distance=16):
fill = Path()
for path in paths:
efill = EulerianFill(distance)
trace = Polygon([path.point(i / 100.0, error=1e-4) for i in range(101)])
points = efill.get_fill(trace)
start = 0
i = 0
while i < len(points):
p = points[i]
if p is None:
fill += Polyline(points[start:i])
start = i + 1
i += 1
if start != i:
fill += Polyline(points[start:i])
return fill
@staticmethod
def length_travel(paths):
distance = 0.0
for p in paths:
for s in p:
if isinstance(s, Move):
if s.start is not None:
distance += Point.distance(s.start, s.end)
return distance
@staticmethod
| def optimize_travel(paths):
optimized = Path()
if isinstance(paths, Path):
paths = [paths]
subpaths = []
for path in paths:
subpaths.extend([abs(Path(s)) for s in path.as_subpaths()])
improved = True
while improved:
improved = False
for j in range(len(subpaths)):
for k in range(j + 1, len(subpaths)):
new_cut = CutPlanner.delta_distance(subpaths, j, k)
if new_cut < 0:
CutPlanner.cross(subpaths, j, k)
improved = True
for p in subpaths:
optimized += p
return optimized
@staticmethod
def cross(subpaths, j, k):
"""
Reverses subpaths flipping the individual elements from position j inclusive to
k exclusive.
:param subpaths:
:param j:
:param k:
:return:
"""
for q in range(j, k):
subpaths[q].direct_close()
subpaths[q].reverse()
subpaths[j:k] = subpaths[j:k][::-1]
@staticmethod
def delta_distance(subpaths, j, k):
distance = 0.0
k -= 1
a1 = subpaths[j][0].end
b0 = subpaths[k][-1].end
if k < len(subpaths) - 1:
b1 = subpaths[k + 1][0].end
d = Point.distance(b0, b1)
distance -= d
d = Point.distance(a1, b1)
distance += d
if j > 0:
a0 = subpaths[j - 1][-1].end
d = Point.distance(a0, a1)
distance -= d
d = Point.distance(a0, b0)
distance += d
return distance
@staticmethod
def distance_path(subpaths):
distance = 0.0
for s in range(len(subpaths) - 1):
j = subpaths[s]
k = subpaths[s + 1]
d = Point.distance(j[-1].end, k[0].end)
distance += d
return distance
@staticmethod
def is_order_constrained(paths, constraints, j, k):
"""Is the order of the sequences between j and k constrained. Such that reversing this order will violate
the constraints."""
for q in range(j, k):
# search between j and k.
first_path = paths[q]
for constraint in constraints:
if first_path is not constraint[0]:
# Constraint does not apply to the value at q.
continue
for m in range(q + 1, k):
second_path = paths[m]
if second_path is constraint[1]:
# Constraint demands the order must be first_path then second_path.
return True
return False
@staticmethod
def optimize_general(paths):
optimized = Path()
if isinstance(paths, Path):
paths = [paths]
subpaths = []
for path in paths:
subpaths.extend([abs(Path(s)) for s in path.as_subpaths()])
constraints = []
for j in range(len(subpaths)):
for k in range(j + 1, len(subpaths)):
if CutPlanner.is_inside(subpaths[k], subpaths[j]):
constraints.append((subpaths[k], subpaths[j]))
elif CutPlanner.is_inside(subpaths[j], subpaths[k]):
constraints.append((subpaths[j], subpaths[k]))
for j in range(len(subpaths)):
for k in range(j + 1, len(subpaths)):
if CutPlanner.is_inside(subpaths[k], subpaths[j]):
t = subpaths[j]
subpaths[j] = subpaths[k]
subpaths[k] = t
# for constraint in constraints:
# success = False
# for q in range(len(subpaths)):
# first_path = subpaths[q]
# if first_path is constraint[0]:
# for m in range(q, len(subpaths)):
# second_path = subpaths[m]
# if second_path is constraint[1]:
# success = True
improved = True
while improved:
improved = False
for j in range(len(subpaths)):
for k in range(j + 1, len(subpaths)):
new_cut = CutPlanner.delta_distance(subpaths, j, k)
if new_cut < 0:
if CutPlanner.is_order_constrained(subpaths, constraints, j, k):
# Our order is constrained. Performing 2-opt cross is disallowed.
continue
CutPlanner.cross(subpaths, j, k)
improved = True
for p in subpaths:
optimized += p
try:
del p.vm
except AttributeError:
pass
try:
del p.bounding_box
except AttributeError:
pass
return optimized
class GraphNode(Point):
def __init__(self, x, y=None):
Point.__init__(self, x, y)
self.connections = []
self.visited = 0
class Segment:
def __init__(self, a, b, index=0):
self.visited = 0
self.a = a
self.b = b
self.active = False
self.value = 'RUNG'
self.index = index
self.bisectors = []
self.object = None
def __len__(self):
# [False, i, p0, p1, high, low, m, b, path]
return 9
def __getitem__(self, item):
if item == 0:
return self.active
if item == 1:
return self.index
if item == 2:
return self.a
if item == 3:
return self.b
if item == 4:
if self.a.y > self.b.y:
return self.a
else:
return self.b
if item == 5:
if self.a.y < self.b.y:
return self.a
else:
return self.b
if item == 6:
if self.b[0] - self.a[0] == 0:
return float('inf')
return (self.b[1] - self.a[1]) / (self.b[0] - self.a[0])
if item == 7:
if self.b[0] - self.a[0] == 0:
return float('inf')
im = (self.b[1] - self.a[1]) / (self.b[0] - self.a[0])
return self.a[1] - (im * self.a[0])
if item == 8:
return self.object
def intersect(self, segment):
return Segment.line_intersect(self.a[0], self.a[1], self.b[0], self.b[1],
segment.a[0], segment.a[1], segment.b[0], segment.b[1],)
def sort_bisectors(self):
def distance(a):
return self.a.distance_to(a)
self.bisectors.sort(key=distance)
def get_intercept(self, y):
im = (self.b[1] - self.a[1]) / (self.b[0] - self.a[0])
ib = self.a[1] - (im * self.a[0])
if isnan(im) or isinf(im):
return self.a[0]
return (y - ib) / im
@staticmethod
def line_intersect(x1, y1, x2, y2, x3, y3, x4, y4):
denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)
if denom == 0:
return None # Parallel.
ua = ((x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)) / denom
ub = ((x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)) / denom
if 0.0 <= ua <= 1.0 and 0.0 <= ub <= 1.0:
return (x1 + ua * (x2 - x1)), (y1 + ua * (y2 - y1))
return None
class Graph:
"""
If the graph is fully Eulerian then there should be an even number of input nodes.
These nodes are treated such that even nodes are input and odd nodes are output nodes.
If partially Eulerian and odd, then the final node is the start or end node.
If setup as a circuit then each node should link input to output effectively.
If a graph is outline, it will be in order, from a to b for each edge, and looped.
"""
def __init__(self):
self.nodes = []
self.links = []
def add_shape(self, series, close=True):
first_node = None
last_node = None
for i in range(len(series)):
m = series[i]
current_node = self.new_node(m)
if i == 0:
first_node = current_node
if last_node is not None:
segment = self.link(last_node, current_node)
segment.index = i
segment.value = 'EDGE'
last_node = current_node
if close:
segment = self.link(last_node, first_node)
segment.index = len(series)
segment.value = 'EDGE'
@staticmethod
def monotone_fill(graph, outlines, min, max, distance):
crawler = VectorMontonizer(low_value=min, high_value=max, start=min)
for outline in outlines:
crawler.add_segments(outline.links)
itr = 0
while crawler.valid_range():
crawler.next_intercept(distance)
crawler.sort_actives()
y = crawler.current
for i in range(1, len(crawler.actives), 2):
left_segment = crawler.actives[i-1]
right_segment = crawler.actives[i]
left_segment_x = crawler.intercept(left_segment, y)
right_segment_x = crawler.intercept(right_segment, y)
left_node = graph.new_node((left_segment_x, y))
right_node = graph.new_node((right_segment_x, y))
row = graph.link(left_node, right_node)
row.value = 'RUNG'
row.index = itr
left_segment.bisectors.append(left_node)
right_segment.bisectors.append(right_node)
itr += 1
for outline in outlines:
itr = 0
current = None
previous = None
for i in range(len(outline.links)):
s = outline.links[i]
if len(s.bisectors) == 0:
continue
s.sort_bisectors()
for bi in s.bisectors:
if previous is not None:
segment = graph.link(previous, bi)
segment.value = 'EDGE'
segment.index = itr
itr += 1
else:
current = bi
previous = bi
s.bisectors.clear()
if current is not None and previous is not None:
segment = graph.link(previous, current)
segment.value = 'EDGE'
segment.index = itr
def new_node(self, point):
g = GraphNode(point)
self.nodes.append(g)
return g
def new_edge(self, a, b):
s = Segment(a, b)
self.links.append(s)
return s
def detach(self, segment):
self.links.remove(segment)
segment.a.connections.remove(segment)
segment.b.connections.remove(segment)
def link(self, a, b):
segment = self.new_edge(a, b)
segment.a.connections.append(segment)
segment.b.connections.append(segment)
return segment
def double(self):
"""
Makes any graph Eulerian. Any graph that is doubled is by definition Eulerian.
:return:
"""
for i in range(len(self.links)):
s = self.links[i]
second_copy = self.link(s.a, s.b)
if s.value == 'RUNG':
second_copy.value = 'SCAFFOLD_RUNG'
else:
second_copy.value = 'SCAFFOLD'
second_copy.index = None
def double_odd_edge(self):
"""
Makes any outline path a Eularian path.
:return:
"""
for i in range(len(self.links)):
segment = self.links[i]
if segment.value == 'EDGE' and segment.index & 1:
second_copy = self.link(segment.a, segment.b)
second_copy.value = 'SCAFFOLD'
second_copy.index = None
def walk(self, points):
if len(self.nodes) == 0:
return
walker = GraphWalker(self)
walker.make_walk()
walker.clip_scaffold_ends()
walker.clip_scaffold_loops()
walker.add_walk(points)
return points
def is_eulerian(self):
ends = 0
for n in self.nodes:
if len(n.connections) & 1:
ends += 1
if ends > 2:
return False
return True
def is_euloopian(self):
for n in self.nodes:
if len(n.connections) & 1:
return False
return True
class GraphWalker:
"""
Graph Walker takes a graph object and finds walks within it.
If the graph is discontinuous it will find no segment between these elements and add a blank segment between them.
"""
def __init__(self, graph):
self.graph = graph
self.walk = list()
self.flip_start = None
self.flip_end = None
def other_node_for_segment(self, current_node, next_segment):
if current_node is next_segment.a:
return next_segment.b
else:
return next_segment.a
def reset_visited(self):
for e in self.walk:
if e is None:
continue
e.visited = 0
def make_walk(self):
itr = 0
for g in self.graph.nodes:
if not g.visited:
if itr != 0:
self.walk.append(None) # Segment is None. There is no link here.
self.make_walk_node(g)
itr += 1
def make_walk_node(self, g):
"""
Starting from the given start node it makes a complete walk in a Eulerian circuit.
It adds the first loop from the start node, then walks its looped walk adding
any additional loops it finds to the current loop.
:param g:
:return:
"""
start = len(self.walk)
self.walk.append(g)
g.visited += 1
self.add_loop(start, g)
i = start
while i < len(self.walk):
node = self.walk[i]
unused = self.find_unused_connection(node)
if unused is None:
i += 2
continue
i += self.add_loop(i, node)
i += 2
def add_loop(self, index, node):
"""
Adds a loop from the current graphnode, without revisiting any nodes.
Returns the altered index caused by adding that loop.
Travels along unused connections until no more travel is possible. If properly Eulerian,
this will only happen when it is looped back on itself.
:param index: index we are adding loop to.
:param node: Node to find alternative path through.
:return: new index after loop is added to the walk.
"""
index += 1
i = index
while True:
unused = self.find_unused_connection(node)
if unused is None:
break
segment = node.connections[unused]
self.walk.insert(i, segment)
i += 1
node.visited += 1
segment.visited += 1
node = self.other_node_for_segment(node, segment)
self.walk.insert(i, node)
i += 1
return i - index
def find_unused_connection(self, node):
"""
Finds the first unused edge segment within the graph node, or None if all connections are used.
:param node: Node to find unused edge segment within.
:return: index of node connection within the graphnode
"""
value = None
for index, c in enumerate(node.connections):
if not c.visited:
if value is None:
value = index
if c.value == 'RUNG':
return index
return value
def add_walk(self, points):
"""
Adds nodes within the walk to the points given to it.
If there is an unconnected section, it will simply create a link across where no link exists.
:param points:
:return:
"""
for i in range(0, len(self.walk), 2):
if i + 1 != len(self.walk):
if self.walk[i+1] is None:
points.append(None)
points.append(self.walk[i])
def remove_loop(self, from_pos, to_pos):
"""
Removes values between the two given points.
Since start and end are the same node, it leaves one in place.
:param from_pos:
:param to_pos:
:return:
"""
if from_pos == to_pos:
return 0
min_pos = min(from_pos, to_pos)
max_pos = max(from_pos, to_pos)
del self.walk[min_pos:max_pos]
return max_pos - min_pos
def remove_biggest_loop_in_range(self, start, end):
"""
Checks scaffolding walk for loops, and removes them if detected.
It resets the visited values for the scaffold walk.
It iterates from the outside to the center, setting the visited value for each node.
If it finds a marked node, that is the biggest loop within the given walk.
:param start:
:param end:
:return:
"""
for i in range(start, end+2, 2):
n = self.get_node(i)
n.visited = None
for i in range(0, int((end-start)//2), 2):
left = start + i
right = end - i
s = self.get_node(left)
if s.visited is not None:
return self.remove_loop(left, s.visited)
# Loop Detected.
if left == right:
break
s.visited = left
e = self.get_node(right)
if e.visited is not None:
return self.remove_loop(right, e.visited)
# Loop Detected.
e.visited = right
return 0
def clip_scaffold_loops(self):
"""
Removes loops consisting of scaffolding from the walk.
Clips unneeded scaffolding.
:return:
"""
start = 0
index = 0
ie = len(self.walk)
while index < ie:
segment = None
try:
segment = self.walk[index+1]
except IndexError:
self.remove_biggest_loop_in_range(start, index)
return
if segment is None or segment.value == 'RUNG':
# Segment is essential.
if start != index:
ie -= self.remove_biggest_loop_in_range(start, index)
start = index + 2
index += 2
def remove_scaffold_ends_in_range(self, start, end):
current = end - start
new_end = end
limit = start + 2
while new_end >= limit:
j_segment = self.walk[new_end - 1]
if j_segment is None or j_segment.value == 'RUNG':
if new_end == end:
break
del self.walk[new_end + 1:end+1]
end = new_end
break
new_end -= 2
new_start = start
limit = end - 2
while new_start <= limit:
j_segment = self.walk[new_start+1]
if j_segment is None or j_segment.value == 'RUNG':
if new_start == start:
break
del self.walk[start:new_start]
start = new_start
break
new_start += 2
def clip_scaffold_ends(self):
"""Finds contiguous regions, and calls removeScaffoldEnds on that range."""
end = len(self.walk) - 1
index = end
while index >= 0:
segment = None
try:
segment = self.walk[index - 1]
except IndexError:
self.remove_scaffold_ends_in_range(index, end)
return
if segment is None:
self.remove_scaffold_ends_in_range(index, end)
end = index - 2
index -= 2
def two_opt(self):
v = self.get_value()
while True:
new_value = self.two_opt_cycle(v)
if v == new_value:
break
def two_opt_cycle(self, value):
if len(self.walk) == 0:
return 0
swap_start = 0
walk_end = len(self.walk)
while swap_start < walk_end:
swap_element = self.walk[swap_start]
m = swap_element.visited
swap_end = swap_start + 2
while swap_end < walk_end:
current_element = self.walk[swap_end]
if swap_element == current_element:
m -= 1
self.flip_start = swap_start + 1
self.flip_end = swap_end - 1
new_value = self.get_value()
if new_value > value:
value = new_value
self.walk[swap_start+1:swap_end] = self.walk[swap_start+1:swap_end:-1] # reverse
else:
self.flip_start = None
self.flip_end = None
if m == 0:
break
swap_end += 2
swap_start += 2
return value
def get_segment(self, index):
if self.flip_start is not None and \
self.flip_end is not None and \
self.flip_start <= index <= self.flip_end:
return self.walk[self.flip_end - (index - self.flip_start)]
return self.walk[index]
def get_node(self, index):
if self.flip_start is not None and \
self.flip_end is not None and \
self.flip_start <= index <= self.flip_end:
return self.walk[self.flip_end - (index - self.flip_start)]
try:
return self.walk[index]
except IndexError:
return None
def get_value(self):
"""
Path values with flip.
:return: Flipped path value.
"""
if len(self.walk) == 0:
return 0
value = 0
start = 0
end = len(self.walk) - 1
while start < end:
i_segment = self.get_segment(start+1)
if i_segment.value == 'RUNG':
break
start += 2
while end >= 2:
i_segment = self.get_segment(end-1)
if i_segment.value == 'RUNG':
break
end -= 2
j = start
while j < end:
j_node = self.get_node(j)
j += 1
j_segment = self.get_segment(j)
j += 1
if j_segment.value != 'RUNG':
# if the node connector is not critical, try to find and skip a loop
k = j
while k < end:
k_node = self.get_node(k)
k += 1
k_segment = self.get_segment(k)
k += 1
if k_segment.value == 'RUNG':
break
if k_node == j_node:
# Only skippable nodes existed before returned to original node, so skip that loop.
value += (k - j) * 10
j = k
j_node = k_node
j_segment = k_segment
break
if j_segment.value == 'SCAFFOLD':
value -= j_segment.a.distance_sq(j_segment.b)
elif j_segment.value == 'RUNG':
value -= j_segment.a.distance_sq(j_segment.b)
return value
class EulerianFill:
def __init__(self, distance):
self.distance = distance
def get_fill(self, points):
outline_graph = Graph()
outline_graph.add_shape(points, True)
graph = Graph()
min_y = min([p[1] for p in points])
max_y = max([p[1] for p in points])
Graph.monotone_fill(graph, [outline_graph], min_y, max_y, self.distance)
graph.double_odd_edge()
walk = list()
graph.walk(walk)
return walk
class VectorMontonizer:
def __init__(self, low_value=-float('inf'), high_value=float(inf), start=-float('inf')):
self.clusters = []
self.dirty_cluster_sort = True
self.actives = []
self.dirty_actives_sort = True
self.current = start
self.dirty_cluster_position = True
self.valid_low_value = low_value
self.valid_high_value = high_value
self.cluster_range_index = 0
self.cluster_low_value = float('inf')
self.cluster_high_value = -float('inf')
def add_segments(self, links):
self.dirty_cluster_position = True
self.dirty_cluster_sort = True
self.dirty_actives_sort = True
for s in links:
self.clusters.append((s[4].y, s)) # High
self.clusters.append((s[5].y, s)) # Low
def add_cluster(self, path):
self.dirty_cluster_position = True
self.dirty_cluster_sort = True
self.dirty_actives_sort = True
for i in range(len(path) - 1):
p0 = path[i]
p1 = path[i + 1]
if p0.y > p1.y:
high = p0
low = p1
else:
high = p1
low = p0
try:
m = (high.y - low.y) / (high.x - low.x)
except ZeroDivisionError:
m = float('inf')
b = low.y - (m * low.x)
if self.valid_low_value > high.y:
continue # Cluster before range.
if self.valid_high_value < low.y:
continue # Cluster after range.
cluster = Segment(p0, p1)
# cluster = [False, i, p0, p1, high, low, m, b, path]
if self.valid_low_value < low.y:
self.clusters.append((low.y, cluster))
if self.valid_high_value > high.y:
self.clusters.append((high.y, cluster))
if high.y >= self.current >= low.y:
cluster.active = True
self.actives.append(cluster)
def valid_range(self):
return self.valid_high_value >= self.current >= self.valid_low_value
def next_intercept(self, delta):
self.scanline(self.current + delta)
self.sort_actives()
return self.valid_range()
def sort_clusters(self):
if not self.dirty_cluster_sort:
return
self.clusters.sort(key=lambda e: e[0])
self.dirty_cluster_sort = False
def sort_actives(self):
if not self.dirty_actives_sort:
return
self.actives.sort(key=self.intercept)
self.dirty_actives_sort = False
def intercept(self, e, y=None):
if y is None:
y = self.current
m = e[6]
b = e[7]
if m == float('nan') or m == float('inf'):
low = e[5]
return low.x
return (y - b) / m
def find_cluster_position(self):
if not self.dirty_cluster_position:
return
self.dirty_cluster_position = False
self.sort_clusters()
self.cluster_range_index = -1
self.cluster_high_value = -float('inf')
self.increment_cluster()
while self.is_higher_than_cluster_range(self.current):
self.increment_cluster()
def in_cluster_range(self, v):
return not self.is_lower_than_cluster_range(v) and not self.is_higher_than_cluster_range(v)
def is_lower_than_cluster_range(self, v):
return v < self.cluster_low_value
def is_higher_than_cluster_range(self, v):
return v > self.cluster_high_value
def increment_cluster(self):
self.cluster_range_index += 1
self.cluster_low_value = self.cluster_high_value
if self.cluster_range_index < len(self.clusters):
self.cluster_high_value = self.clusters[self.cluster_range_index][0]
else:
self.cluster_high_value = float('inf')
if self.cluster_range_index > 0:
return self.clusters[self.cluster_range_index - 1][1]
else:
return None
def decrement_cluster(self):
self.cluster_range_index -= 1
self.cluster_high_value = self.cluster_low_value
if self.cluster_range_index > 0:
self.cluster_low_value = self.clusters[self.cluster_range_index - 1][0]
else:
self.cluster_low_value = -float('inf')
return self.clusters[self.cluster_range_index][1]
def is_point_inside(self, x, y):
self.scanline(y)
self.sort_actives()
for i in range(1, len(self.actives), 2):
prior = self.actives[i - 1]
after = self.actives[i]
if self.intercept(prior, y) <= x <= self.intercept(after, y):
return True
return False
def scanline(self, scan):
self.dirty_actives_sort = True
self.sort_clusters()
self.find_cluster_position()
while self.is_lower_than_cluster_range(scan):
c = self.decrement_cluster()
if c.active:
c.active = False
self.actives.remove(c)
else:
c.active = True
self.actives.append(c)
while self.is_higher_than_cluster_range(scan):
c = self.increment_cluster()
if c.active:
c.active = False
self.actives.remove(c)
else:
c.active = True
self.actives.append(c)
self.current = scan | |
User.js | import AppStorage from './AppStorage'
class | {
login(data) {
return axios.post('/api/auth/login', data, {
headers: {
'Accept': 'application/json',
'Content-type': 'application/json',
}
})
}
storeSession(data) {
return new Promise((resolve, reject) => {
const user = JSON.stringify(data.user)
const token = data.access_token
AppStorage.store(user, token)
resolve()
})
}
loggedIn() {
const storedToken = AppStorage.getToken();
if (!!storedToken)
return true
else
return false
}
getToken() {
const storedToken = AppStorage.getToken();
return storedToken
}
logout() {
return axios.post('/api/auth/logout', {
token: AppStorage.getToken(),
}, {
headers: {
'Accept': 'application/json',
'Content-type': 'application/json'
}
});
}
clearStorage() {
AppStorage.clear();
}
info() {
if (this.loggedIn) {
return AppStorage.getUser()
}
return console.log("please login first")
}
// updateInfo() {
// return new Promise((resolve, reject) => {
// if (this.loggedIn) {
// axios.get('/api/user', {
// params: {
// token: AppStorage.getToken(),
// }
// }, {
// headers: {
// 'Accept': 'application/json',
// 'Content-type': 'application/json'
// }
// }).then(response => {
// if (response.data.error == false) {
// AppStorage.storeUser(JSON.stringify(response.data.item))
// resolve("update success")
// } else {
// reject("User update went wrong")
// }
// }).catch(err => {
// reject(err)
// })
// } else {
// reject("notloggedin")
// }
// })
// }
}
export default User = new User();
| User |
file.py | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.sequence.io.gff"
__author__ = "Patrick Kunzmann"
__all__ = ["GFFFile"]
import copy
import string
from urllib.parse import quote, unquote
import warnings
from ....file import TextFile, InvalidFileError
from ...annotation import Location
# All punctuation characters except
# percent, semicolon, equals, ampersand, comma
_NOT_QUOTED = "".join(
[char for char in string.punctuation if char not in "%;=&,"]
) + " "
class GFFFile(TextFile):
"""
This class represents a file in *Generic Feature Format 3*
(`GFF3 <https://github.com/The-Sequence-Ontology/Specifications/blob/master/gff3.md>`_)
format.
Similar to GenBank files, GFF3 files contain information about
features of a reference sequence, but in a more concise and better
parsable way.
However, it does not provide additional meta information.
This class serves as low-level API for accessing GFF3 files.
It is used as a sequence of entries, where each entry is defined as
a non-comment and non-directive line.
Each entry consists of values corresponding to the 9 columns of
GFF3:
============== =============================== ==========================================================
**seqid** ``str`` The ID of the reference sequence
**source** ``str`` Source of the data (e.g. ``Genbank``)
**type** ``str`` Type of the feature (e.g. ``CDS``)
**start** ``int`` Start coordinate of feature on the reference sequence
**end** ``int`` End coordinate of feature on the reference sequence
**score** ``float`` or ``None`` Optional score (e.g. an E-value)
**strand** ``Location.Strand`` or ``None`` Strand of the feature, ``None`` if feature is not stranded
**phase** ``int`` or ``None`` Reading frame shift, ``None`` for non-CDS features
**attributes** ``dict`` Additional properties of the feature
============== =============================== ==========================================================
Note that the entry index may not be equal to the line index,
because GFF3 files can contain comment and directive lines.
Notes
-----
Although the GFF3 specification allows mixing in reference sequence
data in FASTA format via the ``##FASTA`` directive, this class does
not support extracting the sequence information.
The content after the ``##FASTA`` directive is simply ignored.
Please provide the sequence via a separate file or read the FASTA
data directly via the :attr:`lines` attribute:
>>> import os.path
>>> from io import StringIO
>>> gff_file = GFFFile.read(os.path.join(path_to_sequences, "indexing_test.gff3"))
>>> fasta_start_index = None
>>> for directive, line_index in gff_file.directives():
... if directive == "FASTA":
... fasta_start_index = line_index + 1
>>> fasta_data = StringIO("\\n".join(gff_file.lines[fasta_start_index:]))
>>> fasta_file = FastaFile.read(fasta_data)
>>> for seq_string in fasta_file.values():
... print(seq_string[:60] + "...")
TACGTAGCTAGCTGATCGATGTTGTGTGTATCGATCTAGCTAGCTAGCTGACTACACAAT...
Examples
--------
Reading and editing of an existing GFF3 file:
>>> import os.path
>>> gff_file = GFFFile.read(os.path.join(path_to_sequences, "gg_avidin.gff3"))
>>> # Get content of first entry
>>> seqid, source, type, start, end, score, strand, phase, attrib = gff_file[0]
>>> print(seqid)
AJ311647.1
>>> print(source)
EMBL
>>> print(type)
region
>>> print(start)
1
>>> print(end)
1224
>>> print(score)
None
>>> print(strand)
Strand.FORWARD
>>> print(phase)
None
>>> print(attrib)
{'ID': 'AJ311647.1:1..1224', 'Dbxref': 'taxon:9031', 'Name': 'Z', 'chromosome': 'Z', 'gbkey': 'Src', 'mol_type': 'genomic DNA'}
>>> # Edit the first entry: Simply add a score
>>> score = 1.0
>>> gff_file[0] = seqid, source, type, start, end, score, strand, phase, attrib
>>> # Delete first entry
>>> del gff_file[0]
Writing a new GFF3 file:
>>> gff_file = GFFFile()
>>> gff_file.append_directive("Example directive", "param1", "param2")
>>> gff_file.append(
... "SomeSeqID", "Biotite", "CDS", 1, 99,
... None, Location.Strand.FORWARD, 0,
... {"ID": "FeatureID", "product":"A protein"}
... )
>>> print(gff_file) #doctest: +NORMALIZE_WHITESPACE
##gff-version 3
##Example directive param1 param2
SomeSeqID Biotite CDS 1 99 . + 0 ID=FeatureID;product=A protein
"""
def __init__(self):
super().__init__()
# Maps entry indices to line indices
self._entries = None
# Stores the directives as (directive text, line index)-tuple
self._directives = None
# Stores whether the file has FASTA data
self._has_fasta = None
self._index_entries()
self.append_directive("gff-version", "3")
@classmethod
def read(cls, file):
"""
Read a GFF3 file.
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Returns
-------
file_object : GFFFile
The parsed file.
"""
file = super().read(file)
file._index_entries()
return file
def insert(self, index, seqid, source, type, start, end,
score, strand, phase, attributes=None):
"""
Insert an entry at the given index.
Parameters
----------
index : int
Index where the entry is inserted.
If the index is equal to the length of the file, the entry
is appended at the end of the file.
seqid : str
The ID of the reference sequence.
source : str
Source of the data (e.g. ``Genbank``).
type : str
Type of the feature (e.g. ``CDS``).
start : int
Start coordinate of feature on the reference sequence.
end : int
End coordinate of feature on the reference sequence.
score : float or None
Optional score (e.g. an E-value).
strand : Location.Strand or None
Strand of the feature, ``None`` if feature is not stranded.
phase : int or None
Reading frame shift, ``None`` for non-CDS features.
attributes : dict, optional
Additional properties of the feature.
"""
if index == len(self):
self.append(seqid, source, type, start, end,
score, strand, phase, attributes)
else:
line_index = self._entries[index]
line = GFFFile._create_line(
seqid, source, type, start, end,
score, strand, phase, attributes
)
self.lines.insert(line_index, line)
self._index_entries()
def append(self, seqid, source, type, start, end,
score, strand, phase, attributes=None):
"""
Append an entry to the end of the file.
Parameters
----------
seqid : str
The ID of the reference sequence.
source : str
Source of the data (e.g. ``Genbank``).
type : str
Type of the feature (e.g. ``CDS``).
start : int
Start coordinate of feature on the reference sequence.
end : int
End coordinate of feature on the reference sequence.
score : float or None
Optional score (e.g. an E-value).
strand : Location.Strand or None
Strand of the feature, ``None`` if feature is not stranded.
phase : int or None
Reading frame shift, ``None`` for non-CDS features.
attributes : dict, optional
Additional properties of the feature.
"""
if self._has_fasta:
raise NotImplementedError(
"Cannot append feature entries, "
"as this file contains additional FASTA data"
)
line = GFFFile._create_line(
seqid, source, type, start, end, score, strand, phase, attributes
)
self.lines.append(line)
# Fast update of entry index by adding last line
self._entries.append(len(self.lines) - 1)
def append_directive(self, directive, *args):
"""
Append a directive line to the end of the file.
Parameters
----------
directive : str
Name of the directive.
*args : str
Optional parameters for the directive.
Each argument is simply appended to the directive, separated
by a single space character.
Raises
------
NotImplementedError
If the ``##FASTA`` directive is used, which is not
supported.
Examples
--------
>>> gff_file = GFFFile()
>>> gff_file.append_directive("Example directive", "param1", "param2")
>>> print(gff_file)
##gff-version 3
##Example directive param1 param2
"""
if directive.startswith("FASTA"):
raise NotImplementedError(
"Adding FASTA information is not supported"
)
directive_line = "##" + directive + " " + " ".join(args)
self._directives.append((directive_line[2:], len(self.lines)))
self.lines.append(directive_line)
def directives(self):
"""
Get the directives in the file.
Returns
-------
directives : list of tuple(str, int)
A list of directives, sorted by their line order.
The first element of each tuple is the name of the
directive (without ``##``), the second element is the index
of the corresponding line.
"""
# Sort in line order
return sorted(self._directives, key=lambda directive: directive[1])
def __setitem__(self, index, item):
seqid, source, type, start, end, score, strand, phase, attrib = item
line = GFFFile._create_line(
seqid, source, type, start, end, score, strand, phase, attrib
)
line_index = self._entries[index]
self.lines[line_index] = line
def __getitem__(self, index):
if (index >= 0 and index >= len(self)) or \
(index < 0 and -index > len(self)):
raise IndexError(
f"Index {index} is out of range for GFFFile with "
f"{len(self)} entries"
)
line_index = self._entries[index]
# Columns are tab separated
s = self.lines[line_index].strip().split("\t")
if len(s) != 9:
raise InvalidFileError(f"Expected 9 columns, but got {len(s)}")
seqid, source, type, start, end, score, strand, phase, attrib = s
seqid = unquote(seqid)
source = unquote(source)
type = unquote(type)
start = int(start)
end = int(end)
score = None if score == "." else float(score)
if strand == "+":
strand = Location.Strand.FORWARD
elif strand == "-":
strand = Location.Strand.REVERSE
else:
strand = None
phase = None if phase == "." else int(phase)
attrib = GFFFile._parse_attributes(attrib)
return seqid, source, type, start, end, score, strand, phase, attrib
def __delitem__(self, index):
line_index = self._entries[index]
del self.lines[line_index]
self._index_entries()
def __len__(self):
return len(self._entries)
def _index_entries(self):
"""
Parse the file for comment and directive lines.
Count these lines cumulatively, so that entry indices can be
mapped onto line indices.
Additionally track the line index of directive lines.
"""
self._directives = []
# Worst case allocation -> all lines contain actual entries
self._entries = [None] * len(self.lines)
self._has_fasta = False
entry_counter = 0
for line_i, line in enumerate(self.lines):
if len(line) == 0 or line[0] == " ":
# Empty line -> do nothing
pass
elif line.startswith("#"):
# Comment or directive
if line.startswith("##"):
# Directive
# Omit the leading '##'
self._directives.append((line[2:], line_i))
if line[2:] == "FASTA":
self._has_fasta = True
# This parser does not support bundled FASTA
# data
warnings.warn(
"Biotite does not support FASTA data mixed into "
"GFF files, the FASTA data will be ignored"
)
# To ignore the following FASTA data, stop
# parsing at this point
break
else:
# Actual entry
self._entries[entry_counter] = line_i
entry_counter += 1
# Trim to correct size
self._entries = self._entries[:entry_counter]
@staticmethod
def _create_line(seqid, source, type, start, end,
score, strand, phase, attributes):
|
@staticmethod
def _parse_attributes(attributes):
"""
Parse the *attributes* string into a dictionary.
"""
if attributes == ".":
return {}
attrib_dict = {}
attrib_entries = attributes.split(";")
for entry in attrib_entries:
compounds = entry.split("=")
if len(compounds) != 2:
raise InvalidFileError(
f"Attribute entry '{entry}' is invalid"
)
key, val = compounds
attrib_dict[unquote(key)] = unquote(val)
return attrib_dict | """
Create a line for a newly created entry.
"""
seqid = quote(seqid.strip(), safe=_NOT_QUOTED) \
if seqid is not None else "."
source = quote(source.strip(), safe=_NOT_QUOTED) \
if source is not None else "."
type = type.strip()
# Perform checks
if len(seqid) == 0:
raise ValueError("'seqid' must not be empty")
if len(source) == 0:
raise ValueError("'source' must not be empty")
if len(type) == 0:
raise ValueError("'type' must not be empty")
if seqid[0] == ">":
raise ValueError("'seqid' must not start with '>'")
score = str(score) if score is not None else "."
if strand == Location.Strand.FORWARD:
strand = "+"
elif strand == Location.Strand.REVERSE:
strand = "-"
else:
strand = "."
phase = str(phase) if phase is not None else "."
attributes = ";".join(
[quote(key, safe=_NOT_QUOTED) + "=" + quote(val, safe=_NOT_QUOTED)
for key, val in attributes.items()]
) if attributes is not None and len(attributes) > 0 else "."
return "\t".join(
[seqid, source, type, str(start), str(end),
str(score), strand, phase, attributes]
) |
table.rs | /// This is the state change table. It's indexed first by current state and then by the next
/// character in the pty stream.
use definitions::Action;
pub static STATE_CHANGE: [[u8; 256]; 16] =
[
// Beginning of UTF-8 2 byte sequence
// Beginning of UTF-8 3 byte sequence
// Beginning of UTF-8 4 byte sequence
[0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 92u8, 0u8, 92u8, 10u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8],
[80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
0u8, 80u8, 0u8, 0u8, 80u8, 80u8, 80u8, 80u8, 35u8, 35u8, 35u8, 35u8,
35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8,
180u8, 180u8, 180u8, 180u8, 180u8, 180u8, 180u8, 180u8, 180u8, 180u8,
2u8, 180u8, 36u8, 36u8, 36u8, 36u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 112u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8],
[80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
0u8, 80u8, 0u8, 0u8, 80u8, 80u8, 80u8, 80u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 12u8, 12u8,
12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8,
12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8,
12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8,
12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8,
12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8, 12u8,
12u8, 112u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8],
[80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
0u8, 80u8, 0u8, 0u8, 80u8, 80u8, 80u8, 80u8, 32u8, 32u8, 32u8, 32u8,
32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8,
2u8, 2u8, 2u8, 2u8, 2u8, 2u8, 2u8, 2u8, 2u8, 2u8, 2u8, 2u8, 2u8, 2u8,
2u8, 2u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 112u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8],
[80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
0u8, 80u8, 0u8, 0u8, 80u8, 80u8, 80u8, 80u8, 35u8, 35u8, 35u8, 35u8,
35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8, 35u8,
176u8, 176u8, 176u8, 176u8, 176u8, 176u8, 176u8, 176u8, 176u8, 176u8,
2u8, 176u8, 2u8, 2u8, 2u8, 2u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8,
60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 60u8, 112u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8],
[112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 0u8, 112u8, 0u8, 0u8, 112u8, 112u8, 112u8,
112u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8,
39u8, 39u8, 39u8, 39u8, 39u8, 184u8, 184u8, 184u8, 184u8, 184u8, 184u8,
184u8, 184u8, 184u8, 184u8, 6u8, 184u8, 40u8, 40u8, 40u8, 40u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 112u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8],
[112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 0u8, 112u8, 0u8, 0u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 12u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8],
[112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 0u8, 112u8, 0u8, 0u8, 112u8, 112u8, 112u8,
112u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8,
32u8, 32u8, 32u8, 32u8, 32u8, 6u8, 6u8, 6u8, 6u8, 6u8, 6u8, 6u8, 6u8,
6u8, 6u8, 6u8, 6u8, 6u8, 6u8, 6u8, 6u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 112u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8],
[112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 0u8, 112u8, 0u8, 0u8, 112u8, 112u8, 112u8,
112u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8, 39u8,
39u8, 39u8, 39u8, 39u8, 39u8, 176u8, 176u8, 176u8, 176u8, 176u8, 176u8,
176u8, 176u8, 176u8, 176u8, 6u8, 176u8, 6u8, 6u8, 6u8, 6u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8, 9u8,
9u8, 9u8, 9u8, 9u8, 9u8, 112u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8],
[208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 0u8, 208u8, 0u8, 0u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 208u8,
208u8, 208u8, 208u8, 208u8, 208u8, 208u8, 112u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 12u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8],
[80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
0u8, 80u8, 0u8, 0u8, 80u8, 80u8, 80u8, 80u8, 43u8, 43u8, 43u8, 43u8,
43u8, 43u8, 43u8, 43u8, 43u8, 43u8, 43u8, 43u8, 43u8, 43u8, 43u8, 43u8,
76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8,
76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8,
76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 5u8, 76u8, 76u8, 76u8,
76u8, 76u8, 76u8, 76u8, 14u8, 76u8, 76u8, 1u8, 76u8, 13u8, 14u8, 14u8,
76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, | 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 112u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8],
[80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
0u8, 80u8, 0u8, 0u8, 80u8, 80u8, 80u8, 80u8, 32u8, 32u8, 32u8, 32u8,
32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8, 32u8,
76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8,
76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8,
76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8,
76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8,
76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8,
76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8,
76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 112u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8],
[80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
0u8, 80u8, 0u8, 0u8, 80u8, 80u8, 80u8, 80u8, 192u8, 192u8, 192u8, 192u8,
192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8,
192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8,
192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8,
192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8,
192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8,
192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8,
192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8,
192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8,
192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8, 192u8,
192u8, 192u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8,
80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 0u8, 80u8, 80u8, 80u8, 80u8,
80u8, 80u8, 80u8, 80u8, 80u8, 80u8, 0u8, 80u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8,
255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8,
255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8,
255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8,
255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8,
255u8, 255u8, 255u8, 255u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8],
[112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 12u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 0u8, 112u8, 0u8, 0u8, 112u8, 112u8, 112u8,
112u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8, 144u8,
144u8, 144u8, 144u8, 144u8, 144u8],
[112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 0u8, 112u8, 0u8, 0u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8,
112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 112u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 12u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8],
[0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8]];
pub static ENTRY_ACTIONS: &'static [Action] =
&[Action::None, // State::Anywhere
Action::Clear, // State::CsiEntry
Action::None, // State::CsiIgnore
Action::None, // State::CsiIntermediate
Action::None, // State::CsiParam
Action::Clear, // State::DcsEntry
Action::None, // State::DcsIgnore
Action::None, // State::DcsIntermediate
Action::None, // State::DcsParam
Action::Hook, // State::DcsPassthrough
Action::Clear, // State::Escape
Action::None, // State::EscapeIntermediate
Action::None, // State::Ground
Action::OscStart, // State::OscString
Action::None, // State::SosPmApcString
Action::None];
// State::Utf8
pub static EXIT_ACTIONS: &'static [Action] =
&[Action::None, // State::Anywhere
Action::None, // State::CsiEntry
Action::None, // State::CsiIgnore
Action::None, // State::CsiIntermediate
Action::None, // State::CsiParam
Action::None, // State::DcsEntry
Action::None, // State::DcsIgnore
Action::None, // State::DcsIntermediate
Action::None, // State::DcsParam
Action::Unhook, // State::DcsPassthrough
Action::None, // State::Escape
Action::None, // State::EscapeIntermediate
Action::None, // State::Ground
Action::OscEnd, // State::OscString
Action::None, // State::SosPmApcString
Action::None]; // State::Utf8 | 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, 76u8, |
api-bucket-notification.go | /*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bufio"
"bytes"
"context"
"encoding/xml"
"net/http"
"net/url"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/karngyan/minio-go/v7/pkg/notification"
"github.com/karngyan/minio-go/v7/pkg/s3utils"
)
// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts.
func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("notification", "")
notifBytes, err := xml.Marshal(&config)
if err != nil {
return err
}
notifBuffer := bytes.NewReader(notifBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentBody: notifBuffer,
contentLength: int64(len(notifBytes)),
contentMD5Base64: sumMD5Base64(notifBytes),
contentSHA256Hex: sum256Hex(notifBytes),
}
// Execute PUT to upload a new bucket notification.
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, "")
}
}
return nil
}
// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error {
return c.SetBucketNotification(ctx, bucketName, notification.Configuration{})
}
// GetBucketNotification returns current bucket notification configuration
func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return notification.Configuration{}, err
}
return c.getBucketNotification(ctx, bucketName)
}
// Request server for notification rules.
func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) {
urlValues := make(url.Values)
urlValues.Set("notification", "")
// Execute GET on bucket to list objects.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
return notification.Configuration{}, err
}
return processBucketNotificationResponse(bucketName, resp)
}
// processes the GetNotification http response from the server.
func | (bucketName string, resp *http.Response) (notification.Configuration, error) {
if resp.StatusCode != http.StatusOK {
errResponse := httpRespToErrorResponse(resp, bucketName, "")
return notification.Configuration{}, errResponse
}
var bucketNotification notification.Configuration
err := xmlDecoder(resp.Body, &bucketNotification)
if err != nil {
return notification.Configuration{}, err
}
return bucketNotification, nil
}
// ListenNotification listen for all events, this is a MinIO specific API
func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info {
return c.ListenBucketNotification(ctx, "", prefix, suffix, events)
}
// ListenBucketNotification listen for bucket events, this is a MinIO specific API
func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info {
notificationInfoCh := make(chan notification.Info, 1)
const notificationCapacity = 4 * 1024 * 1024
notificationEventBuffer := make([]byte, notificationCapacity)
// Only success, start a routine to start reading line by line.
go func(notificationInfoCh chan<- notification.Info) {
defer close(notificationInfoCh)
// Validate the bucket name.
if bucketName != "" {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
select {
case notificationInfoCh <- notification.Info{
Err: err,
}:
case <-ctx.Done():
}
return
}
}
// Check ARN partition to verify if listening bucket is supported
if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) {
select {
case notificationInfoCh <- notification.Info{
Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"),
}:
case <-ctx.Done():
}
return
}
// Continuously run and listen on bucket notification.
// Create a done channel to control 'ListObjects' go routine.
retryDoneCh := make(chan struct{}, 1)
// Indicate to our routine to exit cleanly upon return.
defer close(retryDoneCh)
// Prepare urlValues to pass into the request on every loop
urlValues := make(url.Values)
urlValues.Set("prefix", prefix)
urlValues.Set("suffix", suffix)
urlValues["events"] = events
// Wait on the jitter retry loop.
for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
// Execute GET on bucket to list objects.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentSHA256Hex: emptySHA256Hex,
})
if err != nil {
select {
case notificationInfoCh <- notification.Info{
Err: err,
}:
case <-ctx.Done():
}
return
}
// Validate http response, upon error return quickly.
if resp.StatusCode != http.StatusOK {
errResponse := httpRespToErrorResponse(resp, bucketName, "")
select {
case notificationInfoCh <- notification.Info{
Err: errResponse,
}:
case <-ctx.Done():
}
return
}
// Initialize a new bufio scanner, to read line by line.
bio := bufio.NewScanner(resp.Body)
// Use a higher buffer to support unexpected
// caching done by proxies
bio.Buffer(notificationEventBuffer, notificationCapacity)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
// Unmarshal each line, returns marshaled values.
for bio.Scan() {
var notificationInfo notification.Info
if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil {
// Unexpected error during json unmarshal, send
// the error to caller for actionable as needed.
select {
case notificationInfoCh <- notification.Info{
Err: err,
}:
case <-ctx.Done():
return
}
closeResponse(resp)
continue
}
// Send notificationInfo
select {
case notificationInfoCh <- notificationInfo:
case <-ctx.Done():
closeResponse(resp)
return
}
}
if err = bio.Err(); err != nil {
select {
case notificationInfoCh <- notification.Info{
Err: err,
}:
case <-ctx.Done():
return
}
}
// Close current connection before looping further.
closeResponse(resp)
}
}(notificationInfoCh)
// Returns the notification info channel, for caller to start reading from.
return notificationInfoCh
}
| processBucketNotificationResponse |
PluginCard.tsx | import { Button, Card, Col, Popconfirm, Row, Skeleton, Switch } from 'antd'
import { useActions, useValues } from 'kea'
import React from 'react'
import { pluginsLogic } from './pluginsLogic'
import { PluginConfigType, PluginErrorType } from '~/types'
import { PlusOutlined, SettingOutlined } from '@ant-design/icons'
import { Link } from 'lib/components/Link'
import { PluginImage } from './PluginImage'
import { PluginError } from 'scenes/plugins/PluginError'
import { LocalPluginTag } from 'scenes/plugins/LocalPluginTag'
import { PluginInstallationType } from 'scenes/plugins/types'
import { SourcePluginTag } from 'scenes/plugins/SourcePluginTag'
import { CommunityPluginTag } from './CommunityPluginTag'
interface PluginCardProps {
name: string
description?: string
url?: string
pluginConfig?: PluginConfigType
pluginType?: PluginInstallationType
pluginId?: number
error?: PluginErrorType
maintainer?: string
}
export function PluginCard({
name,
description,
url,
pluginType,
pluginConfig,
pluginId,
error,
maintainer,
}: PluginCardProps): JSX.Element {
const { editPlugin, toggleEnabled, installPlugin, resetPluginConfigError } = useActions(pluginsLogic)
const { loading, installingPluginUrl } = useValues(pluginsLogic)
const canConfigure = pluginId && !pluginConfig?.global
const switchDisabled = pluginConfig?.global
return (
<Col
style={{ width: '100%', marginBottom: 20 }}
data-attr={`plugin-card-${pluginConfig ? 'installed' : 'available'}`}
>
<Card className="plugin-card">
<Row align="middle" className="plugin-card-row">
{pluginConfig && (
<Col>
<Popconfirm
placement="topLeft"
title={`Are you sure you wish to ${
pluginConfig.enabled ? 'disable' : 'enable'
} this plugin?`}
onConfirm={() =>
pluginConfig.id
? toggleEnabled({ id: pluginConfig.id, enabled: !pluginConfig.enabled })
: editPlugin(pluginId || null, { __enabled: true })
}
okText="Yes"
cancelText="No"
disabled={switchDisabled}
>
<div>
<Switch checked={pluginConfig.enabled} disabled={switchDisabled} />
</div>
</Popconfirm>
</Col>
)}
<Col className={pluginConfig ? 'hide-plugin-image-below-500' : ''}>
<PluginImage pluginType={pluginType} url={url} />
</Col>
<Col style={{ flex: 1 }}>
<div>
<strong style={{ marginRight: 8 }}>{name}</strong>
{maintainer && !pluginId && <CommunityPluginTag isCommunity={maintainer === 'community'} />}
{!description && !url ? <br /> : null}
{pluginConfig?.error ? (
<PluginError
error={pluginConfig.error}
reset={() => resetPluginConfigError(pluginConfig?.id || 0)}
/>
) : error ? (
<PluginError error={error} />
) : null}
{url?.startsWith('file:') ? <LocalPluginTag url={url} title="Local" /> : null}
{pluginType === 'source' ? <SourcePluginTag /> : null}
</div>
<div>
{description}
{url && (
<span>
{description ? ' ' : ''}
<Link | target="_blank"
rel="noopener noreferrer"
style={{ whiteSpace: 'nowrap' }}
>
Learn more
</Link>
.
</span>
)}
</div>
</Col>
<Col>
{canConfigure && (
<Button
type="primary"
className="padding-under-500"
onClick={() => editPlugin(pluginId || null)}
>
<span className="show-over-500">Configure</span>
<span className="hide-over-500">
<SettingOutlined />
</span>
</Button>
)}
{!pluginId && (
<Button
type="primary"
className="padding-under-500"
loading={loading && installingPluginUrl === url}
disabled={loading && installingPluginUrl !== url}
onClick={url ? () => installPlugin(url, PluginInstallationType.Repository) : undefined}
icon={<PlusOutlined />}
>
<span className="show-over-500">Install</span>
</Button>
)}
</Col>
</Row>
</Card>
</Col>
)
}
export function PluginLoading(): JSX.Element {
return (
<>
{[1, 2, 3].map((i) => (
<Col key={i} style={{ marginBottom: 20, width: '100%' }}>
<Card className="plugin-card">
<Row align="middle" className="plugin-card-row">
<Col className="hide-plugin-image-below-500">
<Skeleton.Avatar active size="large" shape="square" />
</Col>
<Col style={{ flex: 1 }}>
<Skeleton title={false} paragraph={{ rows: 2 }} active />
</Col>
<Col>
<span className="show-over-500">
<Skeleton.Button style={{ width: 100 }} />
</span>
<span className="hide-over-500">
<Skeleton.Button style={{ width: 32 }} />
</span>
</Col>
</Row>
</Card>
</Col>
))}
</>
)
} | to={url} |
test_miranda.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_miranda
----------------------------------
Tests for `miranda` module.
"""
import pytest
from contextlib import contextmanager
from click.testing import CliRunner
from miranda import miranda
from miranda import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
|
def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'miranda.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| """Sample pytest test function with the pytest fixture as an argument.
"""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string |
generated.pb.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-gogo.
// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto
// DO NOT EDIT!
/*
Package v2alpha1 is a generated protocol buffer package.
It is generated from these files:
k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto
It has these top-level messages:
CronJob
CronJobList
CronJobSpec
CronJobStatus
JobTemplate
JobTemplateSpec
*/
package v2alpha1
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import k8s_io_api_core_v1 "k8s.io/api/core/v1"
import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
func (m *CronJob) Reset() { *m = CronJob{} }
func (*CronJob) ProtoMessage() {}
func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} }
func (m *CronJobList) Reset() { *m = CronJobList{} }
func (*CronJobList) ProtoMessage() {}
func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} }
func (m *CronJobSpec) Reset() { *m = CronJobSpec{} }
func (*CronJobSpec) ProtoMessage() {}
func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} }
func (m *CronJobStatus) Reset() { *m = CronJobStatus{} }
func (*CronJobStatus) ProtoMessage() {}
func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} }
func (m *JobTemplate) Reset() { *m = JobTemplate{} }
func (*JobTemplate) ProtoMessage() {}
func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} }
func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} }
func (*JobTemplateSpec) ProtoMessage() {}
func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} }
func init() {
proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v2alpha1.CronJob")
proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v2alpha1.CronJobList")
proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v2alpha1.CronJobSpec")
proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v2alpha1.CronJobStatus")
proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v2alpha1.JobTemplate")
proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v2alpha1.JobTemplateSpec")
}
func (m *CronJob) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CronJob) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
n1, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
n2, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
n3, err := m.Status.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n3
return i, nil
}
func (m *CronJobList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
n4, err := m.ListMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n4
if len(m.Items) > 0 {
for _, msg := range m.Items {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func (m *CronJobSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule)))
i += copy(dAtA[i:], m.Schedule)
if m.StartingDeadlineSeconds != nil {
dAtA[i] = 0x10
i++
i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds))
}
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy)))
i += copy(dAtA[i:], m.ConcurrencyPolicy)
if m.Suspend != nil {
dAtA[i] = 0x20
i++
if *m.Suspend {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
dAtA[i] = 0x2a
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size()))
n5, err := m.JobTemplate.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n5
if m.SuccessfulJobsHistoryLimit != nil {
dAtA[i] = 0x30
i++
i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit))
}
if m.FailedJobsHistoryLimit != nil {
dAtA[i] = 0x38
i++
i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit))
}
return i, nil
}
func (m *CronJobStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Active) > 0 {
for _, msg := range m.Active {
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.LastScheduleTime != nil {
dAtA[i] = 0x22
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size()))
n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n6
}
return i, nil
}
func (m *JobTemplate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
n7, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n7
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size()))
n8, err := m.Template.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n8
return i, nil
}
func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
n9, err := m.ObjectMeta.MarshalTo(dAtA[i:])
if err != nil |
i += n9
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
n10, err := m.Spec.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n10
return i, nil
}
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *CronJob) Size() (n int) {
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Status.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *CronJobList) Size() (n int) {
var l int
_ = l
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *CronJobSpec) Size() (n int) {
var l int
_ = l
l = len(m.Schedule)
n += 1 + l + sovGenerated(uint64(l))
if m.StartingDeadlineSeconds != nil {
n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds))
}
l = len(m.ConcurrencyPolicy)
n += 1 + l + sovGenerated(uint64(l))
if m.Suspend != nil {
n += 2
}
l = m.JobTemplate.Size()
n += 1 + l + sovGenerated(uint64(l))
if m.SuccessfulJobsHistoryLimit != nil {
n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit))
}
if m.FailedJobsHistoryLimit != nil {
n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit))
}
return n
}
func (m *CronJobStatus) Size() (n int) {
var l int
_ = l
if len(m.Active) > 0 {
for _, e := range m.Active {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
if m.LastScheduleTime != nil {
l = m.LastScheduleTime.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func (m *JobTemplate) Size() (n int) {
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Template.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func (m *JobTemplateSpec) Size() (n int) {
var l int
_ = l
l = m.ObjectMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func sovGenerated(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *CronJob) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CronJob{`,
`ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`,
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *CronJobList) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CronJobList{`,
`ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *CronJobSpec) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CronJobSpec{`,
`Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`,
`StartingDeadlineSeconds:` + valueToStringGenerated(this.StartingDeadlineSeconds) + `,`,
`ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`,
`Suspend:` + valueToStringGenerated(this.Suspend) + `,`,
`JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`,
`SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`,
`FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`,
`}`,
}, "")
return s
}
func (this *CronJobStatus) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CronJobStatus{`,
`Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`,
`LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`,
`}`,
}, "")
return s
}
func (this *JobTemplate) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&JobTemplate{`,
`ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *JobTemplateSpec) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&JobTemplateSpec{`,
`ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *CronJob) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CronJob: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CronJob: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CronJobList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CronJobList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, CronJob{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CronJobSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Schedule = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.StartingDeadlineSeconds = &v
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
b := bool(v != 0)
m.Suspend = &b
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.JobTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.SuccessfulJobsHistoryLimit = &v
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.FailedJobsHistoryLimit = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CronJobStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{})
if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.LastScheduleTime == nil {
m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
}
if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *JobTemplate) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipGenerated(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptorGenerated)
}
var fileDescriptorGenerated = []byte{
// 787 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44,
0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x3a, 0xa1, 0xd0, 0x1a, 0xd4, 0x86, 0x80, 0x9c, 0xc8, 0x15, 0x28,
0x42, 0x30, 0xa6, 0x05, 0x21, 0x4e, 0x48, 0xb8, 0x08, 0x4a, 0x29, 0xa2, 0x72, 0x8a, 0x84, 0x50,
0xb5, 0xda, 0xf1, 0x78, 0x92, 0x4c, 0x63, 0x7b, 0x2c, 0xcf, 0x38, 0x52, 0x6e, 0x7b, 0xdb, 0xeb,
0xfe, 0x25, 0x7b, 0xd9, 0xfd, 0x23, 0xba, 0x7b, 0xea, 0xb1, 0xa7, 0x68, 0xeb, 0xfd, 0x2f, 0xf6,
0xb4, 0xf2, 0xc4, 0xf9, 0xd1, 0x38, 0x69, 0xbb, 0x97, 0xde, 0x3c, 0xcf, 0xdf, 0xef, 0x67, 0xde,
0xbc, 0xf7, 0x66, 0x80, 0xd9, 0xff, 0x99, 0x43, 0xca, 0x8c, 0x7e, 0x64, 0x93, 0xd0, 0x27, 0x82,
0x70, 0x63, 0x40, 0x7c, 0x87, 0x85, 0x46, 0xfa, 0x03, 0x05, 0xd4, 0xb0, 0x91, 0xc0, 0x3d, 0x63,
0xb0, 0x8f, 0xdc, 0xa0, 0x87, 0xf6, 0x8c, 0x2e, 0xf1, 0x49, 0x88, 0x04, 0x71, 0x60, 0x10, 0x32,
0xc1, 0xd4, 0xcf, 0xc7, 0x52, 0x88, 0x02, 0x0a, 0xa5, 0x14, 0x4e, 0xa4, 0xf5, 0xef, 0xba, 0x54,
0xf4, 0x22, 0x1b, 0x62, 0xe6, 0x19, 0x5d, 0xd6, 0x65, 0x86, 0x74, 0xd8, 0x51, 0x47, 0xae, 0xe4,
0x42, 0x7e, 0x8d, 0x49, 0xf5, 0xdd, 0xec, 0xa6, 0x99, 0xed, 0xea, 0xfa, 0x9c, 0x08, 0xb3, 0x90,
0x2c, 0xd3, 0xfc, 0x38, 0xd3, 0x78, 0x08, 0xf7, 0xa8, 0x4f, 0xc2, 0xa1, 0x11, 0xf4, 0xbb, 0x49,
0x80, 0x1b, 0x1e, 0x11, 0x68, 0x99, 0xcb, 0x58, 0xe5, 0x0a, 0x23, 0x5f, 0x50, 0x8f, 0x64, 0x0c,
0x3f, 0xdd, 0x65, 0xe0, 0xb8, 0x47, 0x3c, 0x94, 0xf1, 0xfd, 0xb0, 0xca, 0x17, 0x09, 0xea, 0x1a,
0xd4, 0x17, 0x5c, 0x84, 0x8b, 0x26, 0xfd, 0x69, 0x1e, 0x94, 0x0f, 0x42, 0xe6, 0x1f, 0x31, 0x5b,
0x7d, 0x0c, 0x2a, 0xc9, 0x21, 0x1c, 0x24, 0x50, 0x4d, 0x69, 0x2a, 0xad, 0xea, 0xfe, 0xf7, 0x70,
0xd6, 0x85, 0x29, 0x13, 0x06, 0xfd, 0x6e, 0x12, 0xe0, 0x30, 0x51, 0xc3, 0xc1, 0x1e, 0xfc, 0xc7,
0x3e, 0x27, 0x58, 0xfc, 0x4d, 0x04, 0x32, 0xd5, 0x8b, 0x51, 0x23, 0x17, 0x8f, 0x1a, 0x60, 0x16,
0xb3, 0xa6, 0x54, 0xf5, 0x10, 0x14, 0x78, 0x40, 0x70, 0x2d, 0x2f, 0xe9, 0x5f, 0xc3, 0x95, 0x3d,
0x86, 0x69, 0x4e, 0xed, 0x80, 0x60, 0xf3, 0xa3, 0x94, 0x59, 0x48, 0x56, 0x96, 0x24, 0xa8, 0x27,
0xa0, 0xc4, 0x05, 0x12, 0x11, 0xaf, 0xad, 0x49, 0x56, 0xeb, 0x1e, 0x2c, 0xa9, 0x37, 0x3f, 0x4e,
0x69, 0xa5, 0xf1, 0xda, 0x4a, 0x39, 0xfa, 0x4b, 0x05, 0x54, 0x53, 0xe5, 0x31, 0xe5, 0x42, 0x3d,
0xcb, 0x54, 0x03, 0xde, 0xaf, 0x1a, 0x89, 0x5b, 0xd6, 0x62, 0x33, 0xdd, 0xa9, 0x32, 0x89, 0xcc,
0x55, 0xe2, 0x0f, 0x50, 0xa4, 0x82, 0x78, 0xbc, 0x96, 0x6f, 0xae, 0xb5, 0xaa, 0xfb, 0xfa, 0xdd,
0xe9, 0x9b, 0x1b, 0x29, 0xae, 0xf8, 0x67, 0x62, 0xb4, 0xc6, 0x7e, 0xfd, 0x79, 0x61, 0x9a, 0x76,
0x52, 0x1e, 0xf5, 0x5b, 0x50, 0x49, 0xe6, 0xc3, 0x89, 0x5c, 0x22, 0xd3, 0x5e, 0x9f, 0xa5, 0xd1,
0x4e, 0xe3, 0xd6, 0x54, 0xa1, 0xfe, 0x0b, 0x76, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, 0xf7, 0x37, 0x82,
0x1c, 0x97, 0xfa, 0xa4, 0x4d, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x66, 0x7e, 0x11, 0x8f, 0x1a,
0x3b, 0xed, 0xe5, 0x12, 0x6b, 0x95, 0x57, 0x3d, 0x03, 0x5b, 0x98, 0xf9, 0x38, 0x0a, 0x43, 0xe2,
0xe3, 0xe1, 0x09, 0x73, 0x29, 0x1e, 0xca, 0x46, 0xad, 0x9b, 0x30, 0xcd, 0x66, 0xeb, 0x60, 0x51,
0xf0, 0x6e, 0x59, 0xd0, 0xca, 0x82, 0xd4, 0xaf, 0x40, 0x99, 0x47, 0x3c, 0x20, 0xbe, 0x53, 0x2b,
0x34, 0x95, 0x56, 0xc5, 0xac, 0xc6, 0xa3, 0x46, 0xb9, 0x3d, 0x0e, 0x59, 0x93, 0x7f, 0x2a, 0x02,
0xd5, 0x73, 0x66, 0x9f, 0x12, 0x2f, 0x70, 0x91, 0x20, 0xb5, 0xa2, 0xec, 0xe1, 0x37, 0xb7, 0x14,
0xfa, 0x68, 0xa6, 0x96, 0x73, 0xf7, 0x69, 0x9a, 0x6a, 0x75, 0xee, 0x87, 0x35, 0xcf, 0x54, 0x1f,
0x81, 0x3a, 0x8f, 0x30, 0x26, 0x9c, 0x77, 0x22, 0xf7, 0x88, 0xd9, 0xfc, 0x90, 0x72, 0xc1, 0xc2,
0xe1, 0x31, 0xf5, 0xa8, 0xa8, 0x95, 0x9a, 0x4a, 0xab, 0x68, 0x6a, 0xf1, 0xa8, 0x51, 0x6f, 0xaf,
0x54, 0x59, 0xb7, 0x10, 0x54, 0x0b, 0x6c, 0x77, 0x10, 0x75, 0x89, 0x93, 0x61, 0x97, 0x25, 0xbb,
0x1e, 0x8f, 0x1a, 0xdb, 0xbf, 0x2f, 0x55, 0x58, 0x2b, 0x9c, 0xfa, 0x6b, 0x05, 0x6c, 0xdc, 0xb8,
0x11, 0xea, 0x5f, 0xa0, 0x84, 0xb0, 0xa0, 0x83, 0x64, 0x60, 0x92, 0x61, 0xdc, 0x9d, 0xaf, 0x51,
0xf2, 0x18, 0xce, 0xee, 0xb8, 0x45, 0x3a, 0x24, 0x69, 0x05, 0x99, 0x5d, 0xa3, 0x5f, 0xa5, 0xd5,
0x4a, 0x11, 0xaa, 0x0b, 0x36, 0x5d, 0xc4, 0xc5, 0x64, 0xd6, 0x4e, 0xa9, 0x47, 0x64, 0x97, 0x6e,
0x96, 0xfe, 0x96, 0xeb, 0x93, 0x38, 0xcc, 0xcf, 0xe2, 0x51, 0x63, 0xf3, 0x78, 0x81, 0x63, 0x65,
0xc8, 0xfa, 0x2b, 0x05, 0xcc, 0x77, 0xe7, 0x01, 0x9e, 0xb0, 0xff, 0x40, 0x45, 0x4c, 0x46, 0x2a,
0xff, 0xc1, 0x23, 0x35, 0xbd, 0x8b, 0xd3, 0x79, 0x9a, 0xd2, 0xf4, 0x17, 0x0a, 0xf8, 0x64, 0x41,
0xff, 0x00, 0xe7, 0xf9, 0xe5, 0xc6, 0x93, 0xfc, 0xe5, 0x92, 0xb3, 0xc8, 0x53, 0xac, 0x7a, 0x88,
0x4d, 0x78, 0x71, 0xad, 0xe5, 0x2e, 0xaf, 0xb5, 0xdc, 0xd5, 0xb5, 0x96, 0x7b, 0x12, 0x6b, 0xca,
0x45, 0xac, 0x29, 0x97, 0xb1, 0xa6, 0x5c, 0xc5, 0x9a, 0xf2, 0x26, 0xd6, 0x94, 0x67, 0x6f, 0xb5,
0xdc, 0xff, 0x95, 0x49, 0x45, 0xde, 0x07, 0x00, 0x00, 0xff, 0xff, 0x02, 0x60, 0xaa, 0x00, 0x1c,
0x08, 0x00, 0x00,
}
| {
return 0, err
} |
api.rs | //! The marshalling api
use {Variants, Error, Result};
use gc::{DataDef, Gc, Traverseable, Move};
use base::symbol::Symbol;
use stack::{State, StackFrame};
use vm::{self, Thread, Status, RootStr, RootedValue, Root};
use value::{ArrayRepr, DataStruct, ExternFunction, Value, ValueArray, Def};
use thread::{self, Context, RootedThread};
use thread::ThreadInternal;
use base::types;
use base::types::{ArcType, Type};
use types::{VmIndex, VmTag, VmInt};
use std::any::Any;
use std::cell::Ref;
use std::cmp::Ordering;
use std::fmt;
use std::marker::PhantomData;
use std::ops::Deref;
use std::result::Result as StdResult;
pub use value::Userdata;
macro_rules! count {
() => { 0 };
($_e: ident) => { 1 };
($_e: ident, $($rest: ident),*) => { 1 + count!($($rest),*) }
}
#[derive(Copy, Clone, Debug)]
pub enum ValueRef<'a> {
Byte(u8),
Int(VmInt),
Float(f64),
String(&'a str),
Data(Data<'a>),
Tag(VmTag),
Userdata(&'a vm::Userdata),
Internal,
}
// Need to manually implement PartialEq so that `ValueRef`'s with different lifetimes can be compared
impl<'a, 'b> PartialEq<ValueRef<'b>> for ValueRef<'a> {
fn eq(&self, other: &ValueRef) -> bool {
use self::ValueRef::*;
match (self, other) {
(&Byte(l), &Byte(r)) => l == r,
(&Int(l), &Int(r)) => l == r,
(&Float(l), &Float(r)) => l == r,
(&String(l), &String(r)) => l == r,
(&Data(l), &Data(r)) => l == r,
(&Tag(l), &Tag(r)) => l == r,
_ => false,
}
}
}
impl<'a> PartialEq<Value> for ValueRef<'a> {
fn eq(&self, other: &Value) -> bool {
self == &ValueRef::new(other)
}
}
impl<'a> ValueRef<'a> {
pub fn new(value: &'a Value) -> ValueRef<'a> {
match *value {
Value::Byte(i) => ValueRef::Byte(i),
Value::Int(i) => ValueRef::Int(i),
Value::Float(f) => ValueRef::Float(f),
Value::String(ref s) => ValueRef::String(s),
Value::Data(ref data) => ValueRef::Data(Data(data)),
Value::Userdata(ref data) => ValueRef::Userdata(&***data),
Value::Tag(tag) => ValueRef::Tag(tag),
Value::Thread(_) |
Value::Function(_) |
Value::Closure(_) |
Value::Array(_) | // FIXME Expose arrays safely
Value::PartialApplication(_) => ValueRef::Internal,
}
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub struct Data<'a>(&'a DataStruct);
impl<'a> Data<'a> {
pub fn tag(&self) -> VmTag {
self.0.tag
}
pub fn len(&self) -> usize {
self.0.fields.len()
}
pub fn get(&self, index: usize) -> Option<ValueRef<'a>> {
self.0.fields.get(index).map(ValueRef::new)
}
}
/// Marker type representing a hole
pub struct Hole(());
impl VmType for Hole {
type Type = Hole;
fn make_type(_: &Thread) -> ArcType {
Type::hole()
}
}
/// Type representing gluon's IO type#[derive(Debug)]
#[derive(Debug, PartialEq)]
pub enum IO<T> {
Value(T),
Exception(String),
}
pub struct Primitive<F> {
name: &'static str,
function: fn(&Thread) -> Status,
_typ: PhantomData<F>,
}
#[inline]
pub fn primitive<F>(name: &'static str, function: fn(&Thread) -> Status) -> Primitive<F> {
Primitive {
name: name,
function: function,
_typ: PhantomData,
}
}
#[inline]
pub fn primitive_f<F>(name: &'static str, function: fn(&Thread) -> Status, _: F) -> Primitive<F> {
primitive::<F>(name, function)
}
#[macro_export]
macro_rules! primitive {
(0 $name: expr) => {
{
fn wrapper(thread: &$crate::thread::Thread) -> $crate::thread::Status {
$crate::api::VmFunction::unpack_and_call(
&($name as fn () -> _), thread)
}
$crate::api::primitive_f::<fn () -> _>(stringify!($name), wrapper, $name)
}
};
(1 $name: expr) => {
{
fn wrapper(thread: &$crate::thread::Thread) -> $crate::thread::Status {
$crate::api::VmFunction::unpack_and_call(
&($name as fn (_) -> _), thread)
}
$crate::api::primitive_f::<fn (_) -> _>(stringify!($name), wrapper, $name)
}
};
(2 $name: expr) => {
{
fn wrapper(thread: &$crate::thread::Thread) -> $crate::thread::Status {
$crate::api::VmFunction::unpack_and_call(
&($name as fn (_, _) -> _), thread)
}
$crate::api::primitive_f::<fn (_, _) -> _>(stringify!($name), wrapper, $name)
}
};
(3 $name: expr) => {
{
fn wrapper(thread: &$crate::thread::Thread) -> $crate::thread::Status {
$crate::api::VmFunction::unpack_and_call(
&($name as fn (_, _, _) -> _), thread)
}
$crate::api::primitive_f::<fn (_, _, _) -> _>(stringify!($name), wrapper, $name)
}
};
(4 $name: expr) => {
{
fn wrapper(thread: &$crate::thread::Thread) -> $crate::thread::Status {
$crate::api::VmFunction::unpack_and_call(
&($name as fn (_, _, _, _) -> _), thread)
}
$crate::api::primitive_f::<fn (_, _, _, _) -> _>(stringify!($name), wrapper, $name)
}
};
}
#[derive(PartialEq)]
pub struct Generic<T>(pub Value, PhantomData<T>);
impl<T> fmt::Debug for Generic<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl<T> From<Value> for Generic<T> {
fn from(v: Value) -> Generic<T> {
Generic(v, PhantomData)
}
}
impl<T: VmType> VmType for Generic<T> {
type Type = T::Type;
fn make_type(vm: &Thread) -> ArcType {
T::make_type(vm)
}
fn extra_args() -> VmIndex {
T::extra_args()
}
}
impl<'vm, T: VmType> Pushable<'vm> for Generic<T> {
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(self.0);
Ok(())
}
}
impl<'vm, T> Getable<'vm> for Generic<T> {
fn from_value(_: &'vm Thread, value: Variants) -> Option<Generic<T>> {
Some(Generic::from(*value.0))
}
}
impl<T> Traverseable for Generic<T> {
fn traverse(&self, gc: &mut Gc) {
self.0.traverse(gc);
}
}
/// Module containing types which represent generic variables in gluon's type system
pub mod generic {
use super::VmType;
use base::types::ArcType;
use vm::Thread;
use thread::ThreadInternal;
macro_rules! make_generics {
($($i: ident)+) => {
$(
#[derive(Clone, Copy, PartialEq)]
pub enum $i { }
impl VmType for $i {
type Type = $i;
fn make_type(vm: &Thread) -> ArcType {
let s = stringify!($i);
let lower = [s.as_bytes()[0] + 32];
let lower_str = unsafe { ::std::str::from_utf8_unchecked(&lower) };
vm.global_env().get_generic(lower_str)
}
}
)+
}
}
make_generics!{A B C D E F G H I J K L M N O P Q R X Y Z}
}
/// Trait which maps a type in rust to a type in gluon
pub trait VmType {
/// A version of `Self` which implements `Any` allowing a `TypeId` to be retrieved
type Type: ?Sized + Any;
/// Creates an gluon type which maps to `Self` in rust
fn make_type(vm: &Thread) -> ArcType {
vm.get_type::<Self::Type>()
}
/// How many extra arguments a function returning this type requires.
/// Used for abstract types which when used in return position should act like they still need
/// more arguments before they are called
fn extra_args() -> VmIndex {
0
}
}
/// Trait which allows a rust value to be pushed to the virtual machine
pub trait Pushable<'vm> {
/// Pushes `self` to `stack`. If the call is successful a single element should have been added
/// to the stack and `Ok(())` should be returned. If the call is unsuccessful `Status:Error`
/// should be returned and the stack should be left intact
fn push(self, vm: &'vm Thread, context: &mut Context) -> Result<()>;
fn status_push(self, vm: &'vm Thread, context: &mut Context) -> Status
where Self: Sized,
{
match self.push(vm, context) {
Ok(()) => Status::Ok,
Err(err) => {
let msg = context.alloc_ignore_limit(&format!("{}", err)[..]);
context.stack.push(Value::String(msg));
Status::Error
}
}
}
}
/// Trait which allows rust values to be retrieved from the virtual machine
pub trait Getable<'vm>: Sized {
/// unsafe version of from_value which allows references to the internal of GcPtr's to be
/// extracted if `value` is rooted
unsafe fn from_value_unsafe(vm: &'vm Thread, value: Variants) -> Option<Self> {
Self::from_value(vm, value)
}
fn from_value(vm: &'vm Thread, value: Variants) -> Option<Self>;
}
impl<'vm, T: vm::Userdata> Pushable<'vm> for T {
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
let data: Box<vm::Userdata> = Box::new(self);
let userdata = try!(context.alloc_with(thread, Move(data)));
context.stack.push(Value::Userdata(userdata));
Ok(())
}
}
impl<'vm> Getable<'vm> for Value {
fn from_value(_vm: &'vm Thread, value: Variants) -> Option<Self> {
Some(*value.0)
}
}
impl<'vm, T: ?Sized + VmType> VmType for &'vm T {
type Type = T::Type;
fn make_type(vm: &Thread) -> ArcType {
T::make_type(vm)
}
}
impl<'vm, T> Getable<'vm> for &'vm T
where T: vm::Userdata,
{
unsafe fn from_value_unsafe(vm: &'vm Thread, value: Variants) -> Option<Self> {
<*const T as Getable<'vm>>::from_value(vm, value).map(|p| &*p)
}
// Only allow the unsafe version to be used
fn from_value(_vm: &'vm Thread, _value: Variants) -> Option<Self> {
None
}
}
unsafe fn forget_lifetime<'a, 'b, T: ?Sized>(x: &'a T) -> &'b T {
::std::mem::transmute(x)
}
impl<'vm> Getable<'vm> for &'vm str {
fn from_value(_vm: &'vm Thread, value: Variants) -> Option<Self> {
unsafe {
match value.as_ref() {
ValueRef::String(ref s) => Some(forget_lifetime(s)),
_ => None,
}
}
}
}
/// Wrapper type which passes acts as the type `T` but also passes the `VM` to the called function
pub struct WithVM<'vm, T> {
pub vm: &'vm Thread,
pub value: T,
}
impl<'vm, T> VmType for WithVM<'vm, T>
where T: VmType,
{
type Type = T::Type;
fn make_type(vm: &Thread) -> ArcType {
T::make_type(vm)
}
fn extra_args() -> VmIndex {
T::extra_args()
}
}
impl<'vm, T> Pushable<'vm> for WithVM<'vm, T>
where T: Pushable<'vm>,
{
fn push(self, vm: &'vm Thread, context: &mut Context) -> Result<()> {
self.value.push(vm, context)
}
}
impl<'vm, T> Getable<'vm> for WithVM<'vm, T>
where T: Getable<'vm>,
{
unsafe fn from_value_unsafe(vm: &'vm Thread, value: Variants) -> Option<WithVM<'vm, T>> {
T::from_value_unsafe(vm, value).map(|t| WithVM { vm: vm, value: t })
}
fn from_value(vm: &'vm Thread, value: Variants) -> Option<WithVM<'vm, T>> {
T::from_value(vm, value).map(|t| WithVM { vm: vm, value: t })
}
}
impl VmType for () {
type Type = Self;
}
impl<'vm> Pushable<'vm> for () {
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(Value::Int(0));
Ok(())
}
}
impl<'vm> Getable<'vm> for () {
fn from_value(_: &'vm Thread, _: Variants) -> Option<()> {
Some(())
}
}
impl VmType for u8 {
type Type = Self;
}
impl<'vm> Pushable<'vm> for u8 {
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(Value::Byte(self));
Ok(())
}
}
impl<'vm> Getable<'vm> for u8 {
fn from_value(_: &'vm Thread, value: Variants) -> Option<u8> {
match value.as_ref() {
ValueRef::Byte(i) => Some(i),
_ => None,
}
}
}
impl VmType for i32 {
type Type = VmInt;
}
impl<'vm> Pushable<'vm> for i32 {
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(Value::Int(self as VmInt));
Ok(())
}
}
impl<'vm> Getable<'vm> for i32 {
fn from_value(_: &'vm Thread, value: Variants) -> Option<i32> {
match value.as_ref() {
ValueRef::Int(i) => Some(i as i32),
_ => None,
}
}
}
impl VmType for u32 {
type Type = VmInt;
}
impl<'vm> Pushable<'vm> for u32 {
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(Value::Int(self as VmInt));
Ok(())
}
}
impl<'vm> Getable<'vm> for u32 {
fn from_value(_: &'vm Thread, value: Variants) -> Option<u32> {
match value.as_ref() {
ValueRef::Int(i) => Some(i as u32),
_ => None,
}
}
}
impl VmType for usize {
type Type = VmInt;
}
impl<'vm> Pushable<'vm> for usize {
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(Value::Int(self as VmInt));
Ok(())
}
}
impl<'vm> Getable<'vm> for usize {
fn from_value(_: &'vm Thread, value: Variants) -> Option<usize> {
match value.as_ref() {
ValueRef::Int(i) => Some(i as usize),
_ => None,
}
}
}
impl VmType for VmInt {
type Type = Self;
}
impl<'vm> Pushable<'vm> for VmInt {
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(Value::Int(self));
Ok(())
}
}
impl<'vm> Getable<'vm> for VmInt {
fn from_value(_: &'vm Thread, value: Variants) -> Option<VmInt> {
match value.as_ref() {
ValueRef::Int(i) => Some(i),
_ => None,
}
}
}
impl VmType for f64 {
type Type = Self;
}
impl<'vm> Pushable<'vm> for f64 {
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(Value::Float(self));
Ok(())
}
}
impl<'vm> Getable<'vm> for f64 {
fn from_value(_: &'vm Thread, value: Variants) -> Option<f64> {
match value.as_ref() {
ValueRef::Float(f) => Some(f),
_ => None,
}
}
}
impl VmType for bool {
type Type = Self;
fn make_type(vm: &Thread) -> ArcType {
(*vm.global_env().get_env().find_type_info("std.types.Bool").unwrap()).clone().into_type()
}
}
impl<'vm> Pushable<'vm> for bool {
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(Value::Tag(if self { 1 } else { 0 }));
Ok(())
}
}
impl<'vm> Getable<'vm> for bool {
fn from_value(_: &'vm Thread, value: Variants) -> Option<bool> {
match value.as_ref() {
ValueRef::Tag(1) => Some(true),
ValueRef::Tag(0) => Some(false),
_ => None,
}
}
}
impl VmType for Ordering {
type Type = Self;
fn make_type(vm: &Thread) -> ArcType {
let symbol = vm.find_type_info("std.types.Ordering").unwrap().name.clone();
Type::app(Type::ident(symbol), vec![])
}
}
impl<'vm> Pushable<'vm> for Ordering {
fn push(self, _vm: &'vm Thread, context: &mut Context) -> Result<()> {
let tag = match self {
Ordering::Less => 0,
Ordering::Equal => 1,
Ordering::Greater => 2,
};
context.stack.push(Value::Tag(tag));
Ok(())
}
}
impl<'vm> Getable<'vm> for Ordering {
fn from_value(_: &'vm Thread, value: Variants) -> Option<Ordering> {
let tag = match value.as_ref() {
ValueRef::Data(data) => data.tag(),
ValueRef::Tag(tag) => tag,
_ => return None,
};
match tag {
0 => Some(Ordering::Less),
1 => Some(Ordering::Equal),
2 => Some(Ordering::Greater),
_ => None,
}
}
}
impl VmType for str {
type Type = <String as VmType>::Type;
}
impl VmType for String {
type Type = String;
}
impl<'vm, 's> Pushable<'vm> for &'s String {
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
<&str as Pushable>::push(self, thread, context)
}
}
impl<'vm, 's> Pushable<'vm> for &'s str {
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
let s = try!(context.alloc_with(thread, self));
context.stack.push(Value::String(s));
Ok(())
}
}
impl<'vm> Getable<'vm> for String {
fn from_value(_: &'vm Thread, value: Variants) -> Option<String> {
match value.as_ref() {
ValueRef::String(i) => Some(String::from(&i[..])),
_ => None,
}
}
}
impl<'vm> Pushable<'vm> for String {
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
<&str as Pushable>::push(&self, thread, context)
}
}
impl VmType for char {
type Type = Self;
}
impl<'vm> Pushable<'vm> for char {
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(Value::Int(self as VmInt));
Ok(())
}
}
impl<'vm> Getable<'vm> for char {
fn from_value(_: &'vm Thread, value: Variants) -> Option<char> {
match value.as_ref() {
ValueRef::Int(x) => ::std::char::from_u32(x as u32),
_ => None,
}
}
}
impl<'s, T: VmType> VmType for Ref<'s, T> {
type Type = T::Type;
fn make_type(vm: &Thread) -> ArcType {
T::make_type(vm)
}
}
impl<'s, 'vm, T> Pushable<'vm> for Ref<'s, T>
where for<'t> &'t T: Pushable<'vm>,
T: VmType,
{
fn push(self, vm: &'vm Thread, context: &mut Context) -> Result<()> {
<&T as Pushable>::push(&*self, vm, context)
}
}
impl<'s, T> VmType for &'s [T]
where T: VmType + ArrayRepr + 's,
T::Type: Sized,
{
type Type = &'static [T::Type];
fn make_type(vm: &Thread) -> ArcType {
Type::array(T::make_type(vm))
}
}
impl<'vm, 's, T> Pushable<'vm> for &'s [T]
where T: Traverseable + Pushable<'vm> + 's,
&'s [T]: DataDef<Value = ValueArray>,
{
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
let result = try!(context.alloc_with(thread, self));
context.stack.push(Value::Array(result));
Ok(())
}
}
impl<'s, 'vm, T: Copy + ArrayRepr> Getable<'vm> for &'s [T] {
unsafe fn from_value_unsafe(_: &'vm Thread, value: Variants) -> Option<Self> {
match *value.0 {
Value::Array(ptr) => ptr.as_slice().map(|s| &*(s as *const _)),
_ => None,
}
}
// Only allow the unsafe version to be used
fn from_value(_vm: &'vm Thread, _value: Variants) -> Option<Self> {
None
}
}
impl<T> VmType for Vec<T>
where T: VmType,
T::Type: Sized,
{
type Type = Vec<T::Type>;
}
impl<'vm, T> Pushable<'vm> for Vec<T>
where T: Pushable<'vm>,
{
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
let len = self.len() as VmIndex;
for v in self {
if v.push(thread, context) == Err(Error::Message("Push error".into())) {
return Err(Error::Message("Push error".into()));
}
}
let result = {
let Context { ref mut gc, ref stack, .. } = *context;
let values = &stack[stack.len() - len..];
try!(thread::alloc(gc,
thread,
stack,
Def {
tag: 0,
elems: values,
}))
};
for _ in 0..len {
context.stack.pop();
}
context.stack.push(Value::Data(result));
Ok(())
}
}
impl<'s, T: VmType> VmType for *const T {
type Type = T::Type;
fn make_type(vm: &Thread) -> ArcType {
T::make_type(vm)
}
}
impl<'vm, T: vm::Userdata> Getable<'vm> for *const T {
fn from_value(_: &'vm Thread, value: Variants) -> Option<*const T> {
match value.as_ref() {
ValueRef::Userdata(data) => data.downcast_ref::<T>().map(|x| x as *const T),
_ => None,
}
}
}
impl<T: VmType> VmType for Option<T>
where T::Type: Sized,
{
type Type = Option<T::Type>;
fn make_type(vm: &Thread) -> ArcType {
let symbol = vm.find_type_info("std.types.Option").unwrap().name.clone();
Type::app(Type::ident(symbol), vec![T::make_type(vm)])
}
}
impl<'vm, T: Pushable<'vm>> Pushable<'vm> for Option<T> {
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
match self {
Some(value) => {
let len = context.stack.len();
try!(value.push(thread, context));
let arg = [context.stack.pop()];
let value = try!(context.new_data(thread, 1, &arg));
assert!(context.stack.len() == len);
context.stack.push(value);
}
None => context.stack.push(Value::Tag(0)),
}
Ok(())
}
}
impl<'vm, T: Getable<'vm>> Getable<'vm> for Option<T> {
fn from_value(vm: &'vm Thread, value: Variants) -> Option<Option<T>> {
match *value.0 {
Value::Data(data) => {
if data.tag == 0 {
Some(None)
} else {
T::from_value(vm, Variants(&data.fields[1])).map(Some)
}
}
Value::Tag(0) => Some(None),
_ => None,
}
}
}
impl<T: VmType, E: VmType> VmType for StdResult<T, E>
where T::Type: Sized,
E::Type: Sized,
{
type Type = StdResult<T::Type, E::Type>;
fn make_type(vm: &Thread) -> ArcType {
let symbol = vm.find_type_info("std.types.Result").unwrap().name.clone();
Type::app(Type::ident(symbol),
vec![E::make_type(vm), T::make_type(vm)])
}
}
impl<'vm, T: Pushable<'vm>, E: Pushable<'vm>> Pushable<'vm> for StdResult<T, E> {
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
let tag = match self {
Ok(ok) => {
try!(ok.push(thread, context));
1
}
Err(err) => {
try!(err.push(thread, context));
0
}
};
let value = context.stack.pop();
let data = try!(context.alloc_with(thread,
Def {
tag: tag,
elems: &[value],
}));
context.stack.push(Value::Data(data));
Ok(())
}
}
impl<'vm, T: Getable<'vm>, E: Getable<'vm>> Getable<'vm> for StdResult<T, E> {
fn from_value(vm: &'vm Thread, value: Variants) -> Option<StdResult<T, E>> {
match *value.0 {
Value::Data(data) => {
match data.tag {
0 => E::from_value(vm, Variants(&data.fields[0])).map(Err),
1 => T::from_value(vm, Variants(&data.fields[0])).map(Ok),
_ => None,
}
}
_ => None,
}
}
}
pub enum RuntimeResult<T, E> {
Return(T),
Panic(E),
}
impl<T: VmType, E> VmType for RuntimeResult<T, E> {
type Type = T::Type;
fn make_type(vm: &Thread) -> ArcType {
T::make_type(vm)
}
}
impl<'vm, T: Pushable<'vm>, E: fmt::Display> Pushable<'vm> for RuntimeResult<T, E> {
fn push(self, vm: &'vm Thread, context: &mut Context) -> Result<()> {
match self {
RuntimeResult::Return(value) => value.push(vm, context),
RuntimeResult::Panic(err) => Err(Error::Message(format!("{}", err))),
}
}
}
impl<T> VmType for IO<T>
where T: VmType,
T::Type: Sized,
{
type Type = IO<T::Type>;
fn make_type(vm: &Thread) -> ArcType {
let env = vm.global_env().get_env();
let alias = env.find_type_info("IO").unwrap().into_owned();
Type::app(alias.into_type(), vec![T::make_type(vm)])
}
fn extra_args() -> VmIndex {
1
}
}
impl<'vm, T: Getable<'vm>> Getable<'vm> for IO<T> {
fn from_value(vm: &'vm Thread, value: Variants) -> Option<IO<T>> {
T::from_value(vm, value).map(IO::Value)
}
}
impl<'vm, T: Pushable<'vm>> Pushable<'vm> for IO<T> {
fn push(self, vm: &'vm Thread, context: &mut Context) -> Result<()> |
}
/// Type which represents an array in gluon
/// Type implementing both `Pushable` and `Getable` of values of `V`.
/// The actual value, `V` is not accessible directly but is only intended to be transferred between
/// two different threads.
pub struct OpaqueValue<T, V>(RootedValue<T>, PhantomData<V>) where T: Deref<Target = Thread>;
impl<T, V> fmt::Debug for OpaqueValue<T, V>
where T: Deref<Target = Thread>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.0)
}
}
impl<T, V> OpaqueValue<T, V>
where T: Deref<Target = Thread>,
{
/// Unsafe as `Value` are not rooted
pub unsafe fn get_value(&self) -> Value {
*self.0
}
pub fn get_ref(&self) -> ValueRef {
ValueRef::new(&self.0)
}
}
impl<T, V> VmType for OpaqueValue<T, V>
where T: Deref<Target = Thread>,
V: VmType,
V::Type: Sized,
{
type Type = V::Type;
fn make_type(vm: &Thread) -> ArcType {
V::make_type(vm)
}
}
impl<'vm, T, V> Pushable<'vm> for OpaqueValue<T, V>
where T: Deref<Target = Thread>,
V: VmType,
V::Type: Sized,
{
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(*self.0);
Ok(())
}
}
impl<'vm, V> Getable<'vm> for OpaqueValue<&'vm Thread, V> {
fn from_value(vm: &'vm Thread, value: Variants) -> Option<OpaqueValue<&'vm Thread, V>> {
Some(OpaqueValue(vm.root_value_ref(*value.0), PhantomData))
}
}
impl<'vm, V> Getable<'vm> for OpaqueValue<RootedThread, V> {
fn from_value(vm: &'vm Thread, value: Variants) -> Option<OpaqueValue<RootedThread, V>> {
Some(OpaqueValue(vm.root_value(*value.0), PhantomData))
}
}
/// Type which represents an array
pub struct Array<'vm, T>(RootedValue<&'vm Thread>, PhantomData<T>);
impl<'vm, T> Deref for Array<'vm, T> {
type Target = ValueArray;
fn deref(&self) -> &ValueArray {
match *self.0 {
Value::Array(ref data) => data,
_ => panic!("Expected an array found {:?}", self.0),
}
}
}
impl<'vm, T> Array<'vm, T> {
pub fn vm(&self) -> &'vm Thread {
self.0.vm_()
}
}
impl<'vm, T: for<'vm2> Getable<'vm2>> Array<'vm, T> {
pub fn get(&self, index: VmInt) -> Option<T> {
match *self.0 {
Value::Array(data) => {
let v = data.get(index as usize);
T::from_value(self.0.vm(), Variants(&v))
}
_ => None,
}
}
}
impl<'vm, T: VmType> VmType for Array<'vm, T>
where T::Type: Sized,
{
type Type = Array<'static, T::Type>;
fn make_type(vm: &Thread) -> ArcType {
Type::array(T::make_type(vm))
}
}
impl<'vm, T: VmType> Pushable<'vm> for Array<'vm, T>
where T::Type: Sized,
{
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(*self.0);
Ok(())
}
}
impl<'vm, T> Getable<'vm> for Array<'vm, T> {
fn from_value(vm: &'vm Thread, value: Variants) -> Option<Array<'vm, T>> {
Some(Array(vm.root_value_ref(*value.0), PhantomData))
}
}
impl<'vm, T: Any> VmType for Root<'vm, T> {
type Type = T;
}
impl<'vm, T: vm::Userdata> Getable<'vm> for Root<'vm, T> {
fn from_value(vm: &'vm Thread, value: Variants) -> Option<Root<'vm, T>> {
match *value.0 {
Value::Userdata(data) => vm.root::<T>(data).map(From::from),
_ => None,
}
}
}
impl<'vm> VmType for RootStr<'vm> {
type Type = <str as VmType>::Type;
}
impl<'vm> Getable<'vm> for RootStr<'vm> {
fn from_value(vm: &'vm Thread, value: Variants) -> Option<RootStr<'vm>> {
match *value.0 {
Value::String(v) => Some(vm.root_string(v)),
_ => None,
}
}
}
macro_rules! define_tuple {
($($id: ident)+) => {
impl<$($id),+> VmType for ($($id),+)
where $($id: VmType),+,
$($id::Type: Sized),+
{
type Type = ($($id::Type),+);
fn make_type(vm: &Thread) -> ArcType {
let fields = vec![$(
types::Field {
name: Symbol::from(stringify!($id)),
typ: $id::make_type(vm),
}
),+];
Type::record(Vec::new(), fields)
}
}
impl<'vm, $($id: Getable<'vm>),+> Getable<'vm> for ($($id),+) {
#[allow(unused_assignments)]
fn from_value(vm: &'vm Thread, value: Variants) -> Option<($($id),+)> {
match value.as_ref() {
ValueRef::Data(v) => {
let mut i = 0;
let x = ( $(
{ let a = $id::from_value(vm, Variants(&v.0.fields[i])); i += 1; a }
),+ );
match x {
($(Some($id)),+) => Some(( $($id),+ )),
_ => None,
}
}
_ => None,
}
}
}
impl<'vm, $($id),+> Pushable<'vm> for ($($id),+)
where $($id: Pushable<'vm>),+
{
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
let ( $($id),+ ) = self;
$(
try!($id.push(thread, context));
)+
let len = count!($($id),+);
let offset = context.stack.len() - len;
let value = try!(thread::alloc(&mut context.gc, thread, &context.stack, Def {
tag: 0,
elems: &context.stack[offset..],
}));
for _ in 0..len {
context.stack.pop();
}
context.stack.push(Value::Data(value)) ;
Ok(())
}
}
}
}
macro_rules! define_tuples {
($first: ident) => { };
($first: ident $($rest: ident)+) => {
define_tuple!{ $first $($rest)+ }
define_tuples!{ $($rest)+ }
}
}
define_tuples! { _0 _1 _2 _3 _4 _5 _6 }
pub use self::record::Record;
pub mod record {
use base::types;
use base::types::{Type, ArcType};
use base::symbol::Symbol;
use {Variants, Result};
use thread::{self, Context};
use types::VmIndex;
use vm::Thread;
use value::{Def, Value};
use super::{VmType, Getable, Pushable};
pub struct Record<T> {
pub fields: T,
}
impl<FA, A, FB, B> Record<HList<(FA, A), HList<(FB, B), ()>>> {
pub fn split(self) -> (A, B) {
let Record { fields: HList((_, a), HList((_, b), ())) } = self;
(a, b)
}
}
pub struct HList<H, T>(pub H, pub T);
pub trait Field: Default {
fn name() -> &'static str;
}
pub trait FieldList {
fn len() -> VmIndex;
}
pub trait FieldTypes: FieldList {
fn field_types(vm: &Thread, fields: &mut Vec<types::Field<Symbol, ArcType>>);
}
impl FieldList for () {
fn len() -> VmIndex {
0
}
}
impl FieldTypes for () {
fn field_types(_: &Thread, _: &mut Vec<types::Field<Symbol, ArcType>>) {}
}
impl<F, H, T> FieldList for HList<(F, H), T>
where T: FieldList,
{
fn len() -> VmIndex {
1 + T::len()
}
}
impl<F: Field, H: VmType, T> FieldTypes for HList<(F, H), T>
where T: FieldTypes,
{
fn field_types(vm: &Thread, fields: &mut Vec<types::Field<Symbol, ArcType>>) {
fields.push(types::Field {
name: Symbol::from(F::name()),
typ: H::make_type(vm),
});
T::field_types(vm, fields);
}
}
pub trait PushableFieldList<'vm>: FieldList {
fn push(self, vm: &'vm Thread, fields: &mut Context) -> Result<()>;
}
impl<'vm> PushableFieldList<'vm> for () {
fn push(self, _: &'vm Thread, _: &mut Context) -> Result<()> {
Ok(())
}
}
impl<'vm, F: Field, H: Pushable<'vm>, T> PushableFieldList<'vm> for HList<(F, H), T>
where T: PushableFieldList<'vm>,
{
fn push(self, vm: &'vm Thread, fields: &mut Context) -> Result<()> {
let HList((_, head), tail) = self;
try!(head.push(vm, fields));
tail.push(vm, fields)
}
}
pub trait GetableFieldList<'vm>: FieldList + Sized {
fn from_value(vm: &'vm Thread, values: &[Value]) -> Option<Self>;
}
impl<'vm> GetableFieldList<'vm> for () {
fn from_value(_vm: &'vm Thread, values: &[Value]) -> Option<Self> {
debug_assert!(values.is_empty());
Some(())
}
}
impl<'vm, F, H, T> GetableFieldList<'vm> for HList<(F, H), T>
where F: Field,
H: Getable<'vm> + VmType,
T: GetableFieldList<'vm>,
{
fn from_value(vm: &'vm Thread, values: &[Value]) -> Option<Self> {
let head = unsafe { H::from_value(vm, Variants::new(&values[0])) };
head.and_then(|head| {
T::from_value(vm, &values[1..]).map(move |tail| HList((F::default(), head), tail))
})
}
}
impl<A: VmType, F: Field, T: FieldTypes> VmType for Record<HList<(F, A), T>>
where A::Type: Sized,
{
type Type = Record<((&'static str, A::Type),)>;
fn make_type(vm: &Thread) -> ArcType {
let len = HList::<(F, A), T>::len() as usize;
let mut fields = Vec::with_capacity(len);
HList::<(F, A), T>::field_types(vm, &mut fields);
Type::record(Vec::new(), fields)
}
}
impl<'vm, A, F, T> Pushable<'vm> for Record<HList<(F, A), T>>
where A: Pushable<'vm>,
F: Field,
T: PushableFieldList<'vm>,
{
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
try!(self.fields.push(thread, context));
let len = HList::<(F, A), T>::len();
let offset = context.stack.len() - len;
let value = try!(thread::alloc(&mut context.gc,
thread,
&context.stack,
Def {
tag: 0,
elems: &context.stack[offset..],
}));
for _ in 0..len {
context.stack.pop();
}
context.stack.push(Value::Data(value));
Ok(())
}
}
impl<'vm, A, F, T> Getable<'vm> for Record<HList<(F, A), T>>
where A: Getable<'vm> + VmType,
F: Field,
T: GetableFieldList<'vm>,
{
fn from_value(vm: &'vm Thread, value: Variants) -> Option<Self> {
match *value.0 {
Value::Data(ref data) => {
HList::<(F, A), T>::from_value(vm, &data.fields)
.map(|fields| Record { fields: fields })
}
_ => None,
}
}
}
}
#[macro_export]
macro_rules! types {
($($field: ident),*) => {
$(
#[allow(non_camel_case_types)]
#[derive(Default)]
pub struct $field;
impl $crate::api::record::Field for $field {
fn name() -> &'static str {
stringify!($field)
}
}
)*
}
}
#[macro_export]
macro_rules! hlist {
() => { () };
($field: ident => $value: expr) => {
$crate::api::record::HList((_field::$field, $value), ())
};
($field: ident => $value: expr, $($field_tail: ident => $value_tail: expr),*) => {
$crate::api::record::HList((_field::$field, $value),
hlist!($($field_tail => $value_tail),*))
}
}
#[macro_export]
macro_rules! field_decl {
($($field: ident),*) => {
mod _field { types!($($field),*); }
}
}
#[macro_export]
macro_rules! record_no_decl {
($($field: ident => $value: expr),*) => {
{
$crate::api::Record {
fields: hlist!($($field => $value),*)
}
}
}
}
/// Macro that creates a record that can be passed to gluon
///
/// ```rust,ignore
/// record!(x => 1, y => 2, name => "Gluon")
/// ```
#[macro_export]
macro_rules! record {
($($field: ident => $value: expr),*) => {
{
field_decl!($($field),*);
record_no_decl!($($field => $value),*)
}
}
}
impl<F: VmType> VmType for Primitive<F> {
type Type = F::Type;
fn make_type(vm: &Thread) -> ArcType {
F::make_type(vm)
}
}
impl<'vm, F> Pushable<'vm> for Primitive<F>
where F: FunctionType + VmType + Send + Sync,
{
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
let extern_function = Box::new(self.function);
let id = Symbol::from(self.name);
let value = Value::Function(try!(context.alloc_with(thread,
Move(ExternFunction {
id: id,
args: F::arguments(),
function: extern_function,
}))));
context.stack.push(value);
Ok(())
}
}
pub struct CPrimitive {
function: extern "C" fn(&Thread) -> Status,
args: VmIndex,
id: Symbol,
}
impl CPrimitive {
pub unsafe fn new(function: extern "C" fn(&Thread) -> Status,
args: VmIndex,
id: &str)
-> CPrimitive {
CPrimitive {
id: Symbol::from(id),
function: function,
args: args,
}
}
}
impl<'vm> Pushable<'vm> for CPrimitive {
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
use std::mem::transmute;
let function = self.function;
let extern_function = unsafe {
// The VM guarantess that it only ever calls this function with itself which should
// make sure that ignoring the lifetime is safe
transmute::<Box<Fn(&'vm Thread) -> Status + Send + Sync>,
Box<Fn(&Thread) -> Status + Send + Sync>>(Box::new(move |vm| function(vm)))
};
let value = try!(context.alloc_with(thread,
Move(ExternFunction {
id: self.id,
args: self.args,
function: extern_function,
})));
context.stack.push(Value::Function(value));
Ok(())
}
}
fn make_type<T: ?Sized + VmType>(vm: &Thread) -> ArcType {
<T as VmType>::make_type(vm)
}
/// Type which represents a function reference in gluon
pub type FunctionRef<'vm, F> = Function<&'vm Thread, F>;
/// Type which represents an function in gluon
pub struct Function<T, F>
where T: Deref<Target = Thread>,
{
value: RootedValue<T>,
_marker: PhantomData<F>,
}
impl<T, F> Function<T, F>
where T: Deref<Target = Thread>,
{
pub fn value(&self) -> Value {
*self.value
}
}
impl<T, F> VmType for Function<T, F>
where T: Deref<Target = Thread>,
F: VmType,
{
type Type = F::Type;
fn make_type(vm: &Thread) -> ArcType {
F::make_type(vm)
}
}
impl<'vm, T, F: Any> Pushable<'vm> for Function<T, F>
where T: Deref<Target = Thread>,
F: VmType,
{
fn push(self, _: &'vm Thread, context: &mut Context) -> Result<()> {
context.stack.push(*self.value);
Ok(())
}
}
impl<'vm, F> Getable<'vm> for Function<&'vm Thread, F> {
fn from_value(vm: &'vm Thread, value: Variants) -> Option<Function<&'vm Thread, F>> {
Some(Function {
value: vm.root_value_ref(*value.0),
_marker: PhantomData,
})//TODO not type safe
}
}
/// Trait which represents a function
pub trait FunctionType {
/// Returns how many arguments the function needs to be provided to call it
fn arguments() -> VmIndex;
}
/// Trait which abstracts over types which can be called by being pulling the arguments it needs
/// from the virtual machine's stack
pub trait VmFunction<'vm> {
fn unpack_and_call(&self, vm: &'vm Thread) -> Status;
}
impl<'s, T: FunctionType> FunctionType for &'s T {
fn arguments() -> VmIndex {
T::arguments()
}
}
impl<'vm, 's, T: ?Sized> VmFunction<'vm> for &'s T
where T: VmFunction<'vm>,
{
fn unpack_and_call(&self, vm: &'vm Thread) -> Status {
(**self).unpack_and_call(vm)
}
}
impl<F> FunctionType for Box<F>
where F: FunctionType,
{
fn arguments() -> VmIndex {
F::arguments()
}
}
impl<'vm, F: ?Sized> VmFunction<'vm> for Box<F>
where F: VmFunction<'vm>,
{
fn unpack_and_call(&self, vm: &'vm Thread) -> Status {
(**self).unpack_and_call(vm)
}
}
macro_rules! make_vm_function {
($($args:ident),*) => (
impl <$($args: VmType,)* R: VmType> VmType for fn ($($args),*) -> R {
#[allow(non_snake_case)]
type Type = fn ($($args::Type),*) -> R::Type;
#[allow(non_snake_case)]
fn make_type(vm: &Thread) -> ArcType {
let args = vec![$(make_type::<$args>(vm)),*];
Type::function(args, make_type::<R>(vm))
}
}
impl <'vm, $($args,)* R> Pushable<'vm> for fn ($($args),*) -> R
where $($args: Getable<'vm> + VmType + 'vm,)* R: Pushable<'vm> + VmType + 'vm {
fn push(self, thread: &'vm Thread, context: &mut Context) -> Result<()> {
let f = Box::new(move |vm| self.unpack_and_call(vm));
let extern_function = unsafe {
//The VM guarantess that it only ever calls this function with itself which should
//make sure that ignoring the lifetime is safe
::std::mem::transmute
::<Box<Fn(&'vm Thread) -> Status + Send + Sync>,
Box<Fn(&Thread) -> Status + Send + Sync>>(f)
};
let id = Symbol::from("<extern>");
let value = Value::Function(try!(context.alloc_with(thread, Move(
ExternFunction {
id: id,
args: count!($($args),*) + R::extra_args(),
function: extern_function
}))));
context.stack.push(value);
Ok(())
}
}
impl <'vm, $($args,)* R: VmType> FunctionType for fn ($($args),*) -> R {
fn arguments() -> VmIndex {
count!($($args),*) + R::extra_args()
}
}
impl <'vm, $($args,)* R> VmFunction<'vm> for fn ($($args),*) -> R
where $($args: Getable<'vm> + 'vm,)*
R: Pushable<'vm> + VmType + 'vm
{
#[allow(non_snake_case, unused_mut, unused_assignments, unused_variables, unused_unsafe)]
fn unpack_and_call(&self, vm: &'vm Thread) -> Status {
let n_args = Self::arguments();
let mut context = vm.context();
let mut i = 0;
let r = unsafe {
let (lock, ($($args,)*)) = {
let stack = StackFrame::current(&mut context.stack);
$(let $args = {
let x = $args::from_value_unsafe(vm, Variants(&stack[i]))
.expect(stringify!(Argument $args));
i += 1;
x
});*;
// Lock the frame to ensure that any reference from_value_unsafe may have returned stay
// rooted
(stack.into_lock(), ($($args,)*))
};
drop(context);
let r = (*self)($($args),*);
context = vm.context();
context.stack.release_lock(lock);
r
};
r.status_push(vm, &mut context)
}
}
impl <'s, $($args,)* R: VmType> FunctionType for Fn($($args),*) -> R + 's {
fn arguments() -> VmIndex {
count!($($args),*) + R::extra_args()
}
}
impl <'s, $($args: VmType,)* R: VmType> VmType for Fn($($args),*) -> R + 's {
type Type = fn ($($args::Type),*) -> R::Type;
#[allow(non_snake_case)]
fn make_type(vm: &Thread) -> ArcType {
let args = vec![$(make_type::<$args>(vm)),*];
Type::function(args, make_type::<R>(vm))
}
}
impl <'vm, $($args,)* R> VmFunction<'vm> for Fn($($args),*) -> R + 'vm
where $($args: Getable<'vm> + 'vm,)*
R: Pushable<'vm> + VmType + 'vm
{
#[allow(non_snake_case, unused_mut, unused_assignments, unused_variables, unused_unsafe)]
fn unpack_and_call(&self, vm: &'vm Thread) -> Status {
let n_args = Self::arguments();
let mut context = vm.context();
let mut i = 0;
let r = unsafe {
let (lock, ($($args,)*)) = {
let stack = StackFrame::current(&mut context.stack);
$(let $args = {
let x = $args::from_value_unsafe(vm, Variants(&stack[i]))
.expect(stringify!(Argument $args));
i += 1;
x
});*;
// Lock the frame to ensure that any reference from_value_unsafe may have returned stay
// rooted
(stack.into_lock(), ($($args,)*))
};
drop(context);
let r = (*self)($($args),*);
context = vm.context();
context.stack.release_lock(lock);
r
};
r.status_push(vm, &mut context)
}
}
impl<'vm, T, $($args,)* R> Function<T, fn($($args),*) -> R>
where $($args: Pushable<'vm>,)*
T: Deref<Target = Thread>,
R: VmType + Getable<'vm>
{
#[allow(non_snake_case)]
pub fn call(&'vm mut self $(, $args: $args)*) -> Result<R> {
let vm = self.value.vm();
let mut context = vm.context();
StackFrame::current(&mut context.stack).enter_scope(0, State::Unknown);
context.stack.push(*self.value);
$(
try!($args.push(vm, &mut context));
)*
for _ in 0..R::extra_args() {
0.push(vm, &mut context).unwrap();
}
let args = count!($($args),*) + R::extra_args();
let mut context = try!(vm.call_function(context, args)).unwrap();
let result = context.stack.pop();
R::from_value(vm, Variants(&result))
.ok_or_else(|| {
error!("Wrong type {:?}", result);
Error::Message("Wrong type".to_string())
})
}
}
)
}
make_vm_function!();
make_vm_function!(A);
make_vm_function!(A, B);
make_vm_function!(A, B, C);
make_vm_function!(A, B, C, D);
make_vm_function!(A, B, C, D, E);
make_vm_function!(A, B, C, D, E, F);
make_vm_function!(A, B, C, D, E, F, G);
#[macro_export]
macro_rules! vm_function {
($func: expr) => ({
fn wrapper<'b, 'c>(vm: &Thread) {
$func.unpack_and_call(vm)
}
wrapper
})
}
| {
match self {
IO::Value(value) => value.push(vm, context),
IO::Exception(exc) => Err(Error::Message(exc)),
}
} |
converter.rs | use crate::core::{converter::*, error::*, size_threshold::SizeThreshold};
use async_trait::async_trait;
use clock::Clock;
use mqtt_channel::Message;
use thin_edge_json::serialize::ThinEdgeJsonSerializer;
pub struct AzureConverter {
pub(crate) add_timestamp: bool,
pub(crate) clock: Box<dyn Clock>,
pub(crate) size_threshold: SizeThreshold,
pub(crate) mapper_config: MapperConfig,
}
impl AzureConverter {
pub fn new(add_timestamp: bool, clock: Box<dyn Clock>, size_threshold: SizeThreshold) -> Self {
let mapper_config = MapperConfig {
in_topic_filter: make_valid_topic_filter_or_panic("tedge/measurements"),
out_topic: make_valid_topic_or_panic("az/messages/events/"),
errors_topic: make_valid_topic_or_panic("tedge/errors"),
};
AzureConverter {
add_timestamp,
clock,
size_threshold,
mapper_config,
}
}
}
#[async_trait]
impl Converter for AzureConverter {
type Error = ConversionError;
fn get_mapper_config(&self) -> &MapperConfig {
&self.mapper_config
}
async fn try_convert(&mut self, input: &Message) -> Result<Vec<Message>, Self::Error> {
let () = self.size_threshold.validate(input)?;
let default_timestamp = self.add_timestamp.then(|| self.clock.now());
let mut serializer = ThinEdgeJsonSerializer::new_with_timestamp(default_timestamp);
let () = thin_edge_json::parser::parse_str(input.payload_str()?, &mut serializer)?;
let payload = serializer.into_string()?;
Ok(vec![(Message::new(&self.mapper_config.out_topic, payload))])
}
}
#[cfg(test)]
mod tests {
use crate::{
az::converter::AzureConverter,
core::{converter::*, error::ConversionError, size_threshold::SizeThreshold},
};
use assert_json_diff::*;
use assert_matches::*;
use clock::Clock;
use mqtt_channel::{Message, Topic};
use serde_json::json;
use time::macros::datetime;
struct TestClock;
impl Clock for TestClock {
fn | (&self) -> clock::Timestamp {
datetime!(2021-04-08 00:00:00 +05:00)
}
}
#[tokio::test]
async fn converting_invalid_json_is_invalid() {
let mut converter =
AzureConverter::new(false, Box::new(TestClock), SizeThreshold(255 * 1024));
let input = "This is not Thin Edge JSON";
let result = converter.try_convert(&new_tedge_message(input)).await;
assert_matches!(result, Err(ConversionError::FromThinEdgeJsonParser(_)))
}
fn new_tedge_message(input: &str) -> Message {
Message::new(&Topic::new_unchecked("tedge/measurements"), input)
}
fn extract_first_message_payload(mut messages: Vec<Message>) -> String {
messages.pop().unwrap().payload_str().unwrap().to_string()
}
#[tokio::test]
async fn converting_input_without_timestamp_produces_output_without_timestamp_given_add_timestamp_is_false(
) {
let mut converter =
AzureConverter::new(false, Box::new(TestClock), SizeThreshold(255 * 1024));
let input = r#"{
"temperature": 23.0
}"#;
let expected_output = json!({
"temperature": 23.0
});
let output = converter.convert(&new_tedge_message(input)).await;
assert_json_eq!(
serde_json::from_str::<serde_json::Value>(&extract_first_message_payload(output))
.unwrap(),
expected_output
);
}
#[tokio::test]
async fn converting_input_with_timestamp_produces_output_with_timestamp_given_add_timestamp_is_false(
) {
let mut converter =
AzureConverter::new(false, Box::new(TestClock), SizeThreshold(255 * 1024));
let input = r#"{
"time" : "2013-06-22T17:03:14.000+02:00",
"temperature": 23.0
}"#;
let expected_output = json!({
"time" : "2013-06-22T17:03:14+02:00",
"temperature": 23.0
});
let output = converter.convert(&new_tedge_message(input)).await;
assert_json_eq!(
serde_json::from_str::<serde_json::Value>(&extract_first_message_payload(output))
.unwrap(),
expected_output
);
}
#[tokio::test]
async fn converting_input_with_timestamp_produces_output_with_timestamp_given_add_timestamp_is_true(
) {
let mut converter =
AzureConverter::new(true, Box::new(TestClock), SizeThreshold(255 * 1024));
let input = r#"{
"time" : "2013-06-22T17:03:14.000+02:00",
"temperature": 23.0
}"#;
let expected_output = json!({
"time" : "2013-06-22T17:03:14+02:00",
"temperature": 23.0
});
let output = converter.convert(&new_tedge_message(input)).await;
assert_json_eq!(
serde_json::from_str::<serde_json::Value>(&extract_first_message_payload(output))
.unwrap(),
expected_output
);
}
#[tokio::test]
async fn converting_input_without_timestamp_produces_output_with_timestamp_given_add_timestamp_is_true(
) {
let mut converter =
AzureConverter::new(true, Box::new(TestClock), SizeThreshold(255 * 1024));
let input = r#"{
"temperature": 23.0
}"#;
let expected_output = json!({
"temperature": 23.0,
"time": "2021-04-08T00:00:00+05:00"
});
let output = converter.convert(&new_tedge_message(input)).await;
assert_json_eq!(
serde_json::from_str::<serde_json::Value>(&extract_first_message_payload(output))
.unwrap(),
expected_output
);
}
#[tokio::test]
async fn exceeding_threshold_returns_error() {
let mut converter = AzureConverter::new(false, Box::new(TestClock), SizeThreshold(1));
let _topic = "tedge/measurements".to_string();
let input = "ABC";
let result = converter.try_convert(&new_tedge_message(input)).await;
assert_matches!(
result,
Err(ConversionError::SizeThresholdExceeded {
topic: _topic,
actual_size: 3,
threshold: 1
})
);
}
}
| now |
impl_arithmetic.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use num_traits::identities::Zero;
use tidb_query_codegen::rpn_fn;
use tidb_query_common::Result;
use tidb_query_datatype::codec::data_type::*;
use tidb_query_datatype::codec::{self, div_i64, div_i64_with_u64, div_u64_with_i64, Error};
use tidb_query_datatype::expr::EvalContext;
#[rpn_fn(nonnull)]
#[inline]
pub fn arithmetic<A: ArithmeticOp>(lhs: &A::T, rhs: &A::T) -> Result<Option<A::T>> {
A::calc(lhs, rhs)
}
#[rpn_fn(capture = [ctx])]
#[inline]
pub fn arithmetic_with_ctx<A: ArithmeticOpWithCtx>(
ctx: &mut EvalContext,
arg0: Option<&A::T>,
arg1: Option<&A::T>,
) -> Result<Option<A::T>> {
if let (Some(lhs), Some(rhs)) = (arg0, arg1) {
A::calc(ctx, lhs, rhs)
} else {
Ok(None)
}
}
#[rpn_fn(nonnull, capture = [ctx])]
#[inline]
pub fn arithmetic_with_ctx_nonnull<A: ArithmeticOpWithCtx>(
ctx: &mut EvalContext,
lhs: &A::T,
rhs: &A::T,
) -> Result<Option<A::T>> {
A::calc(ctx, lhs, rhs)
}
pub trait ArithmeticOp {
type T: Evaluable + EvaluableRet;
fn calc(lhs: &Self::T, rhs: &Self::T) -> Result<Option<Self::T>>;
}
pub trait ArithmeticOpWithCtx {
type T: Evaluable + EvaluableRet;
fn calc(ctx: &mut EvalContext, lhs: &Self::T, rhs: &Self::T) -> Result<Option<Self::T>>;
}
#[derive(Debug)]
pub struct IntIntPlus;
impl ArithmeticOp for IntIntPlus {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
lhs.checked_add(*rhs)
.ok_or_else(|| Error::overflow("BIGINT", &format!("({} + {})", lhs, rhs)).into())
.map(Some)
}
}
#[derive(Debug)]
pub struct IntUintPlus;
impl ArithmeticOp for IntUintPlus {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
let res = if *lhs >= 0 {
(*lhs as u64).checked_add(*rhs as u64)
} else {
(*rhs as u64).checked_sub(lhs.overflowing_neg().0 as u64)
};
res.ok_or_else(|| {
Error::overflow("BIGINT UNSIGNED", &format!("({} + {})", lhs, rhs)).into()
})
.map(|v| Some(v as i64))
}
}
#[derive(Debug)]
pub struct UintIntPlus;
impl ArithmeticOp for UintIntPlus {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
IntUintPlus::calc(rhs, lhs)
}
}
#[derive(Debug)]
pub struct UintUintPlus;
impl ArithmeticOp for UintUintPlus {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
(*lhs as u64)
.checked_add(*rhs as u64)
.ok_or_else(|| {
Error::overflow("BIGINT UNSIGNED", &format!("({} + {})", lhs, rhs)).into()
})
.map(|v| Some(v as i64))
}
}
#[derive(Debug)]
pub struct RealPlus;
impl ArithmeticOp for RealPlus {
type T = Real;
fn calc(lhs: &Real, rhs: &Real) -> Result<Option<Real>> {
if (**lhs > 0f64 && **rhs > (std::f64::MAX - **lhs))
|| (**lhs < 0f64 && **rhs < (-std::f64::MAX - **lhs))
{
return Err(Error::overflow("DOUBLE", &format!("({} + {})", lhs, rhs)).into());
}
Ok(Some(*lhs + *rhs))
}
}
#[derive(Debug)]
pub struct DecimalPlus;
impl ArithmeticOp for DecimalPlus {
type T = Decimal;
fn calc(lhs: &Decimal, rhs: &Decimal) -> Result<Option<Decimal>> {
let res: codec::Result<Decimal> = (lhs + rhs).into();
Ok(Some(res?))
}
}
#[derive(Debug)]
pub struct IntIntMinus;
impl ArithmeticOp for IntIntMinus {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
lhs.checked_sub(*rhs)
.ok_or_else(|| Error::overflow("BIGINT", &format!("({} - {})", lhs, rhs)).into())
.map(Some)
}
}
#[derive(Debug)]
pub struct IntUintMinus;
impl ArithmeticOp for IntUintMinus {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
if *lhs >= 0 {
(*lhs as u64)
.checked_sub(*rhs as u64)
.ok_or_else(|| Error::overflow("BIGINT", &format!("({} - {})", lhs, rhs)).into())
.map(|v| Some(v as i64))
} else {
Err(Error::overflow("BIGINT", &format!("({} - {})", lhs, rhs)).into())
}
}
}
#[derive(Debug)]
pub struct UintIntMinus;
impl ArithmeticOp for UintIntMinus {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
let res = if *rhs >= 0 {
(*lhs as u64).checked_sub(*rhs as u64)
} else {
(*lhs as u64).checked_add(rhs.overflowing_neg().0 as u64)
};
res.ok_or_else(|| Error::overflow("BIGINT", &format!("({} - {})", lhs, rhs)).into())
.map(|v| Some(v as i64))
}
}
#[derive(Debug)]
pub struct UintUintMinus;
impl ArithmeticOp for UintUintMinus {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
(*lhs as u64)
.checked_sub(*rhs as u64)
.ok_or_else(|| {
Error::overflow("BIGINT UNSIGNED", &format!("({} - {})", lhs, rhs)).into()
})
.map(|v| Some(v as i64))
}
}
#[derive(Debug)]
pub struct RealMinus;
impl ArithmeticOp for RealMinus {
type T = Real;
fn calc(lhs: &Real, rhs: &Real) -> Result<Option<Real>> {
let res = *lhs - *rhs;
if res.is_infinite() {
return Err(Error::overflow("DOUBLE", &format!("({} - {})", lhs, rhs)).into());
}
Ok(Some(res))
}
}
#[derive(Debug)]
pub struct DecimalMinus;
impl ArithmeticOp for DecimalMinus {
type T = Decimal;
fn calc(lhs: &Decimal, rhs: &Decimal) -> Result<Option<Decimal>> {
let res: codec::Result<Decimal> = (lhs - rhs).into();
Ok(Some(res?))
}
}
#[derive(Debug)]
pub struct IntIntMod;
impl ArithmeticOp for IntIntMod {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
if *rhs == 0i64 {
return Ok(None);
}
Ok(Some(lhs % rhs))
}
}
#[derive(Debug)]
pub struct IntUintMod;
impl ArithmeticOp for IntUintMod {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
if *rhs == 0i64 {
return Ok(None);
}
Ok(Some(
((lhs.overflowing_abs().0 as u64) % (*rhs as u64)) as i64,
))
}
}
#[derive(Debug)]
pub struct UintIntMod;
impl ArithmeticOp for UintIntMod {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
if *rhs == 0i64 {
return Ok(None);
}
Ok(Some(
((*lhs as u64) % (rhs.overflowing_abs().0 as u64)) as i64,
))
}
}
#[derive(Debug)]
pub struct UintUintMod;
impl ArithmeticOp for UintUintMod {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
if *rhs == 0i64 {
return Ok(None);
}
Ok(Some(((*lhs as u64) % (*rhs as u64)) as i64))
}
}
#[derive(Debug)]
pub struct RealMod;
impl ArithmeticOp for RealMod {
type T = Real;
fn calc(lhs: &Real, rhs: &Real) -> Result<Option<Real>> {
if (*rhs).into_inner() == 0f64 {
return Ok(None);
}
Ok(Some(*lhs % *rhs))
}
}
#[derive(Debug)]
pub struct DecimalMod;
impl ArithmeticOpWithCtx for DecimalMod {
type T = Decimal;
fn calc(ctx: &mut EvalContext, lhs: &Decimal, rhs: &Decimal) -> Result<Option<Decimal>> {
Ok(if let Some(value) = lhs % rhs {
value
.into_result_with_overflow_err(
ctx,
Error::overflow("DECIMAL", format!("({} % {})", lhs, rhs)),
)
.map(Some)
} else {
ctx.handle_division_by_zero().map(|_| None)
}?)
}
}
#[derive(Debug)]
pub struct DecimalMultiply;
impl ArithmeticOp for DecimalMultiply {
type T = Decimal;
fn calc(lhs: &Decimal, rhs: &Decimal) -> Result<Option<Decimal>> {
let res: codec::Result<Decimal> = (lhs * rhs).into();
Ok(Some(res?))
}
}
#[derive(Debug)]
pub struct RealMultiply;
impl ArithmeticOp for RealMultiply {
type T = Real;
fn calc(lhs: &Real, rhs: &Real) -> Result<Option<Real>> {
let res = *lhs * *rhs;
if res.is_infinite() {
Err(Error::overflow("REAL", &format!("({} * {})", lhs, rhs)).into())
} else {
Ok(Some(res))
}
}
}
#[derive(Debug)]
pub struct IntIntMultiply;
impl ArithmeticOp for IntIntMultiply {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
lhs.checked_mul(*rhs)
.ok_or_else(|| Error::overflow("BIGINT", &format!("({} * {})", lhs, rhs)).into())
.map(Some)
}
}
#[derive(Debug)]
pub struct IntUintMultiply;
impl ArithmeticOp for IntUintMultiply {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
if *lhs >= 0 {
(*lhs as u64).checked_mul(*rhs as u64).map(|x| x as i64)
} else {
None
}
.ok_or_else(|| Error::overflow("BIGINT UNSIGNED", &format!("({} * {})", lhs, rhs)).into())
.map(Some)
}
}
#[derive(Debug)]
pub struct UintIntMultiply;
impl ArithmeticOp for UintIntMultiply {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
IntUintMultiply::calc(rhs, lhs)
}
}
#[derive(Debug)]
pub struct UintUintMultiply;
impl ArithmeticOp for UintUintMultiply {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
(*lhs as u64)
.checked_mul(*rhs as u64)
.ok_or_else(|| {
Error::overflow("BIGINT UNSIGNED", &format!("({} * {})", lhs, rhs)).into()
})
.map(|v| Some(v as i64))
}
}
#[derive(Debug)]
pub struct IntDivideInt;
impl ArithmeticOp for IntDivideInt {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
if *rhs == 0 {
return Ok(None);
}
Ok(Some(div_i64(*lhs, *rhs)?))
}
}
#[derive(Debug)]
pub struct IntDivideUint;
impl ArithmeticOp for IntDivideUint {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
if *rhs == 0 {
return Ok(None);
}
Ok(Some(div_i64_with_u64(*lhs, *rhs as u64).map(|r| r as i64)?))
}
}
#[derive(Debug)]
pub struct UintDivideUint;
impl ArithmeticOp for UintDivideUint {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
if *rhs == 0 {
return Ok(None);
}
Ok(Some(((*lhs as u64) / (*rhs as u64)) as i64))
}
}
#[derive(Debug)]
pub struct UintDivideInt;
impl ArithmeticOp for UintDivideInt {
type T = Int;
fn calc(lhs: &Int, rhs: &Int) -> Result<Option<Int>> {
if *rhs == 0 {
return Ok(None);
}
Ok(Some(div_u64_with_i64(*lhs as u64, *rhs).map(|r| r as i64)?))
}
}
#[rpn_fn(capture = [ctx])]
#[inline]
fn int_divide_decimal(
ctx: &mut EvalContext,
lhs: Option<&Decimal>,
rhs: Option<&Decimal>,
) -> Result<Option<Int>> {
let result = try_opt!(arithmetic_with_ctx::<DecimalDivide>(ctx, lhs, rhs)).as_i64();
Ok(if result.is_truncated() {
Some(result.unwrap())
} else {
result
.into_result_with_overflow_err(
ctx,
Error::overflow(
"BIGINT",
format!("({} / {})", lhs.as_ref().unwrap(), rhs.as_ref().unwrap()),
),
)
.map(Some)?
})
}
pub struct DecimalDivide;
impl ArithmeticOpWithCtx for DecimalDivide {
type T = Decimal;
fn calc(ctx: &mut EvalContext, lhs: &Decimal, rhs: &Decimal) -> Result<Option<Decimal>> {
Ok(if let Some(value) = lhs / rhs {
value
.into_result_with_overflow_err(
ctx,
Error::overflow("DECIMAL", format!("({} / {})", lhs, rhs)),
)
.map(Some)
} else {
// TODO: handle RpnFuncExtra's field_type, round the result if is needed.
ctx.handle_division_by_zero().map(|_| None)
}?)
}
}
pub struct RealDivide;
impl ArithmeticOpWithCtx for RealDivide {
type T = Real;
fn calc(ctx: &mut EvalContext, lhs: &Real, rhs: &Real) -> Result<Option<Real>> {
Ok(if rhs.is_zero() {
ctx.handle_division_by_zero().map(|_| None)?
} else {
let result = *lhs / *rhs;
if result.is_infinite() {
ctx.handle_overflow_err(Error::overflow("DOUBLE", &format!("{} / {}", lhs, rhs)))
.map(|_| None)?
} else {
Some(result)
}
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
use tidb_query_datatype::builder::FieldTypeBuilder;
use tidb_query_datatype::{FieldTypeFlag, FieldTypeTp};
use tipb::ScalarFuncSig;
use crate::test_util::RpnFnScalarEvaluator;
use tidb_query_datatype::codec::error::ERR_DIVISION_BY_ZERO;
use tidb_query_datatype::expr::{EvalConfig, Flag, SqlMode};
#[test]
fn test_plus_int() {
let test_cases = vec![
(None, false, Some(1), false, None),
(Some(1), false, None, false, None),
(Some(17), false, Some(25), false, Some(42)),
(
Some(std::i64::MIN),
false,
Some((std::i64::MAX as u64 + 1) as i64),
true,
Some(0),
),
]; | for (lhs, lhs_is_unsigned, rhs, rhs_is_unsigned, expected) in test_cases {
let lhs_field_type = FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(if lhs_is_unsigned {
FieldTypeFlag::UNSIGNED
} else {
FieldTypeFlag::empty()
})
.build();
let rhs_field_type = FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(if rhs_is_unsigned {
FieldTypeFlag::UNSIGNED
} else {
FieldTypeFlag::empty()
})
.build();
let output = RpnFnScalarEvaluator::new()
.push_param_with_field_type(lhs, lhs_field_type)
.push_param_with_field_type(rhs, rhs_field_type)
.evaluate(ScalarFuncSig::PlusInt)
.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_plus_real() {
let test_cases = vec![
(
Real::new(1.01001).ok(),
Real::new(-0.01).ok(),
Real::new(1.00001).ok(),
false,
),
(Real::new(1e308).ok(), Real::new(1e308).ok(), None, true),
(
Real::new(std::f64::MAX - 1f64).ok(),
Real::new(2f64).ok(),
None,
true,
),
];
for (lhs, rhs, expected, is_err) in test_cases {
let output = RpnFnScalarEvaluator::new()
.push_param(lhs)
.push_param(rhs)
.evaluate(ScalarFuncSig::PlusReal);
if is_err {
assert!(output.is_err())
} else {
let output = output.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
}
#[test]
fn test_plus_decimal() {
let test_cases = vec![("1.1", "2.2", "3.3")];
for (lhs, rhs, expected) in test_cases {
let expected: Option<Decimal> = expected.parse().ok();
let output = RpnFnScalarEvaluator::new()
.push_param(lhs.parse::<Decimal>().ok())
.push_param(rhs.parse::<Decimal>().ok())
.evaluate(ScalarFuncSig::PlusDecimal)
.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_minus_int() {
let test_cases = vec![
(None, false, Some(1), false, None, false),
(Some(1), false, None, false, None, false),
(Some(12), false, Some(1), false, Some(11), false),
(
Some(0),
true,
Some(std::i64::MIN),
false,
Some((std::i64::MAX as u64 + 1) as i64),
false,
),
(
Some(std::i64::MIN),
false,
Some(std::i64::MAX),
false,
None,
true,
),
(
Some(std::i64::MAX),
false,
Some(std::i64::MIN),
false,
None,
true,
),
(Some(-1), false, Some(2), true, None, true),
(Some(1), true, Some(2), false, None, true),
];
for (lhs, lhs_is_unsigned, rhs, rhs_is_unsigned, expected, is_err) in test_cases {
let lhs_field_type = FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(if lhs_is_unsigned {
FieldTypeFlag::UNSIGNED
} else {
FieldTypeFlag::empty()
})
.build();
let rhs_field_type = FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(if rhs_is_unsigned {
FieldTypeFlag::UNSIGNED
} else {
FieldTypeFlag::empty()
})
.build();
let output = RpnFnScalarEvaluator::new()
.push_param_with_field_type(lhs, lhs_field_type)
.push_param_with_field_type(rhs, rhs_field_type)
.evaluate(ScalarFuncSig::MinusInt);
if is_err {
assert!(output.is_err())
} else {
let output = output.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
}
#[test]
fn test_minus_real() {
let test_cases = vec![
(
Real::new(1.01001).ok(),
Real::new(-0.01).ok(),
Real::new(1.02001).ok(),
false,
),
(
Real::new(std::f64::MIN).ok(),
Real::new(std::f64::MAX).ok(),
None,
true,
),
];
for (lhs, rhs, expected, is_err) in test_cases {
let output = RpnFnScalarEvaluator::new()
.push_param(lhs)
.push_param(rhs)
.evaluate(ScalarFuncSig::MinusReal);
if is_err {
assert!(output.is_err())
} else {
let output = output.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
}
#[test]
fn test_minus_decimal() {
let test_cases = vec![("1.1", "2.2", "-1.1")];
for (lhs, rhs, expected) in test_cases {
let expected: Option<Decimal> = expected.parse().ok();
let output = RpnFnScalarEvaluator::new()
.push_param(lhs.parse::<Decimal>().ok())
.push_param(rhs.parse::<Decimal>().ok())
.evaluate(ScalarFuncSig::MinusDecimal)
.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_mod_int() {
let tests = vec![
(Some(13), Some(11), Some(2)),
(Some(-13), Some(11), Some(-2)),
(Some(13), Some(-11), Some(2)),
(Some(-13), Some(-11), Some(-2)),
(Some(33), Some(11), Some(0)),
(Some(33), Some(-11), Some(0)),
(Some(-33), Some(-11), Some(0)),
(Some(-11), None, None),
(None, Some(-11), None),
(Some(11), Some(0), None),
(Some(-11), Some(0), None),
(
Some(std::i64::MAX),
Some(std::i64::MIN),
Some(std::i64::MAX),
),
(Some(std::i64::MIN), Some(std::i64::MAX), Some(-1)),
];
for (lhs, rhs, expected) in tests {
let output = RpnFnScalarEvaluator::new()
.push_param(lhs)
.push_param(rhs)
.evaluate(ScalarFuncSig::ModInt)
.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_mod_int_unsigned() {
let tests = vec![
(
Some(std::u64::MAX as i64),
true,
Some(std::i64::MIN),
false,
Some(std::i64::MAX),
),
(
Some(std::i64::MIN),
false,
Some(std::u64::MAX as i64),
true,
Some(std::i64::MIN),
),
];
for (lhs, lhs_is_unsigned, rhs, rhs_is_unsigned, expected) in tests {
let lhs_field_type = FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(if lhs_is_unsigned {
FieldTypeFlag::UNSIGNED
} else {
FieldTypeFlag::empty()
})
.build();
let rhs_field_type = FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(if rhs_is_unsigned {
FieldTypeFlag::UNSIGNED
} else {
FieldTypeFlag::empty()
})
.build();
let output = RpnFnScalarEvaluator::new()
.push_param_with_field_type(lhs, lhs_field_type)
.push_param_with_field_type(rhs, rhs_field_type)
.evaluate(ScalarFuncSig::ModInt)
.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_mod_real() {
let tests = vec![
(Real::new(1.0).ok(), None, None),
(None, Real::new(1.0).ok(), None),
(
Real::new(1.0).ok(),
Real::new(1.1).ok(),
Real::new(1.0).ok(),
),
(
Real::new(-1.0).ok(),
Real::new(1.1).ok(),
Real::new(-1.0).ok(),
),
(
Real::new(1.0).ok(),
Real::new(-1.1).ok(),
Real::new(1.0).ok(),
),
(
Real::new(-1.0).ok(),
Real::new(-1.1).ok(),
Real::new(-1.0).ok(),
),
(Real::new(1.0).ok(), Real::new(0.0).ok(), None),
];
for (lhs, rhs, expected) in tests {
let output = RpnFnScalarEvaluator::new()
.push_param(lhs)
.push_param(rhs)
.evaluate(ScalarFuncSig::ModReal)
.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_mod_decimal() {
let tests = vec![
("13", "11", "2"),
("-13", "11", "-2"),
("13", "-11", "2"),
("-13", "-11", "-2"),
("33", "11", "0"),
("-33", "11", "0"),
("33", "-11", "0"),
("-33", "-11", "0"),
("0.0000000001", "1.0", "0.0000000001"),
("1", "1.1", "1"),
("-1", "1.1", "-1"),
("1", "-1.1", "1"),
("-1", "-1.1", "-1"),
("3", "0", ""),
("-3", "0", ""),
("0", "0", ""),
("-3", "", ""),
("", ("-3"), ""),
("", "", ""),
];
for (lhs, rhs, expected) in tests {
let expected = expected.parse::<Decimal>().ok();
let output = RpnFnScalarEvaluator::new()
.push_param(lhs.parse::<Decimal>().ok())
.push_param(rhs.parse::<Decimal>().ok())
.evaluate(ScalarFuncSig::ModDecimal)
.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_multiply_decimal() {
let test_cases = vec![("1.1", "2.2", "2.42")];
for (lhs, rhs, expected) in test_cases {
let expected: Option<Decimal> = expected.parse().ok();
let output = RpnFnScalarEvaluator::new()
.push_param(lhs.parse::<Decimal>().ok())
.push_param(rhs.parse::<Decimal>().ok())
.evaluate(ScalarFuncSig::MultiplyDecimal)
.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_int_divide_int() {
let test_cases = vec![
(13, false, 11, false, Some(1)),
(13, false, -11, false, Some(-1)),
(-13, false, 11, false, Some(-1)),
(-13, false, -11, false, Some(1)),
(33, false, 11, false, Some(3)),
(33, false, -11, false, Some(-3)),
(-33, false, 11, false, Some(-3)),
(-33, false, -11, false, Some(3)),
(11, false, 0, false, None),
(-11, false, 0, false, None),
(-3, false, 5, true, Some(0)),
(3, false, -5, false, Some(0)),
(std::i64::MIN + 1, false, -1, false, Some(std::i64::MAX)),
(std::i64::MIN, false, 1, false, Some(std::i64::MIN)),
(std::i64::MAX, false, 1, false, Some(std::i64::MAX)),
(
std::u64::MAX as i64,
true,
1,
false,
Some(std::u64::MAX as i64),
),
];
for (lhs, lhs_is_unsigned, rhs, rhs_is_unsigned, expected) in test_cases {
let lhs_field_type = FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(if lhs_is_unsigned {
FieldTypeFlag::UNSIGNED
} else {
FieldTypeFlag::empty()
})
.build();
let rhs_field_type = FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(if rhs_is_unsigned {
FieldTypeFlag::UNSIGNED
} else {
FieldTypeFlag::empty()
})
.build();
let output = RpnFnScalarEvaluator::new()
.push_param_with_field_type(lhs, lhs_field_type)
.push_param_with_field_type(rhs, rhs_field_type)
.evaluate(ScalarFuncSig::IntDivideInt)
.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_int_divide_int_overflow() {
let test_cases = vec![
(std::i64::MIN, false, -1, false),
(-1, false, 1, true),
(-2, false, 1, true),
(1, true, -1, false),
(2, true, -1, false),
];
for (lhs, lhs_is_unsigned, rhs, rhs_is_unsigned) in test_cases {
let lhs_field_type = FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(if lhs_is_unsigned {
FieldTypeFlag::UNSIGNED
} else {
FieldTypeFlag::empty()
})
.build();
let rhs_field_type = FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(if rhs_is_unsigned {
FieldTypeFlag::UNSIGNED
} else {
FieldTypeFlag::empty()
})
.build();
let output: Result<Option<Int>> = RpnFnScalarEvaluator::new()
.push_param_with_field_type(lhs, lhs_field_type)
.push_param_with_field_type(rhs, rhs_field_type)
.evaluate(ScalarFuncSig::IntDivideInt);
assert!(output.is_err(), "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_int_divide_decimal() {
let test_cases = vec![
(Some("11.01"), Some("1.1"), Some(10)),
(Some("-11.01"), Some("1.1"), Some(-10)),
(Some("11.01"), Some("-1.1"), Some(-10)),
(Some("-11.01"), Some("-1.1"), Some(10)),
(Some("123.0"), None, None),
(None, Some("123.0"), None),
// divide by zero
(Some("0.0"), Some("0.0"), None),
(None, None, None),
];
for (lhs, rhs, expected) in test_cases {
let output = RpnFnScalarEvaluator::new()
.push_param(lhs.map(|f| Decimal::from_bytes(f.as_bytes()).unwrap().unwrap()))
.push_param(rhs.map(|f| Decimal::from_bytes(f.as_bytes()).unwrap().unwrap()))
.evaluate(ScalarFuncSig::IntDivideDecimal)
.unwrap();
assert_eq!(output, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_int_divide_decimal_overflow() {
let test_cases = vec![
(Decimal::from(std::i64::MIN), Decimal::from(-1)),
(
Decimal::from(std::i64::MAX),
Decimal::from_bytes(b"0.1").unwrap().unwrap(),
),
];
for (lhs, rhs) in test_cases {
let output: Result<Option<Int>> = RpnFnScalarEvaluator::new()
.push_param(lhs)
.push_param(rhs)
.evaluate(ScalarFuncSig::IntDivideDecimal);
assert!(output.is_err(), "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_real_multiply() {
let should_pass = vec![(1.01001, -0.01, Real::new(-0.0101001).ok())];
for (lhs, rhs, expected) in should_pass {
assert_eq!(
expected,
RpnFnScalarEvaluator::new()
.push_param(lhs)
.push_param(rhs)
.evaluate(ScalarFuncSig::MultiplyReal)
.unwrap()
);
}
let should_fail = vec![
(std::f64::MAX, std::f64::MAX),
(std::f64::MAX, std::f64::MIN),
];
for (lhs, rhs) in should_fail {
assert!(
RpnFnScalarEvaluator::new()
.push_param(lhs)
.push_param(rhs)
.evaluate::<Real>(ScalarFuncSig::MultiplyReal)
.is_err(),
"{} * {} should fail",
lhs,
rhs
);
}
}
#[test]
fn test_int_multiply() {
let should_pass = vec![
(11, 17, Some(187)),
(-1, -3, Some(3)),
(1, std::i64::MIN, Some(std::i64::MIN)),
];
for (lhs, rhs, expected) in should_pass {
assert_eq!(
expected,
RpnFnScalarEvaluator::new()
.push_param_with_field_type(lhs, FieldTypeTp::LongLong)
.push_param_with_field_type(rhs, FieldTypeTp::LongLong)
.evaluate(ScalarFuncSig::MultiplyInt)
.unwrap()
);
}
let should_fail = vec![(std::i64::MAX, 2), (std::i64::MIN, -1)];
for (lhs, rhs) in should_fail {
assert!(
RpnFnScalarEvaluator::new()
.push_param_with_field_type(lhs, FieldTypeTp::LongLong)
.push_param_with_field_type(rhs, FieldTypeTp::LongLong)
.evaluate::<Int>(ScalarFuncSig::MultiplyInt)
.is_err(),
"{} * {} should fail",
lhs,
rhs
);
}
}
#[test]
fn test_int_uint_multiply() {
let should_pass = vec![(std::i64::MAX, 1, Some(std::i64::MAX)), (3, 7, Some(21))];
for (lhs, rhs, expected) in should_pass {
assert_eq!(
expected,
RpnFnScalarEvaluator::new()
.push_param_with_field_type(lhs, FieldTypeTp::LongLong)
.push_param_with_field_type(
rhs,
FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(FieldTypeFlag::UNSIGNED)
)
.evaluate(ScalarFuncSig::MultiplyInt)
.unwrap()
);
}
let should_fail = vec![(-2, 1), (std::i64::MIN, 2)];
for (lhs, rhs) in should_fail {
assert!(
RpnFnScalarEvaluator::new()
.push_param_with_field_type(lhs, FieldTypeTp::LongLong)
.push_param_with_field_type(
rhs,
FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(FieldTypeFlag::UNSIGNED)
)
.evaluate::<Int>(ScalarFuncSig::MultiplyInt)
.is_err(),
"{} * {} should fail",
lhs,
rhs
);
}
}
#[test]
fn test_uint_uint_multiply() {
let should_pass = vec![
(7, 11, Some(77)),
(1, 2, Some(2)),
(std::u64::MAX as i64, 1, Some(std::u64::MAX as i64)),
];
for (lhs, rhs, expected) in should_pass {
assert_eq!(
expected,
RpnFnScalarEvaluator::new()
.push_param_with_field_type(
lhs,
FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(FieldTypeFlag::UNSIGNED)
)
.push_param_with_field_type(
rhs,
FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(FieldTypeFlag::UNSIGNED)
)
.evaluate(ScalarFuncSig::MultiplyIntUnsigned)
.unwrap()
);
}
let should_fail = vec![(std::u64::MAX as i64, 2)];
for (lhs, rhs) in should_fail {
assert!(
RpnFnScalarEvaluator::new()
.push_param_with_field_type(
lhs,
FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(FieldTypeFlag::UNSIGNED)
)
.push_param_with_field_type(
rhs,
FieldTypeBuilder::new()
.tp(FieldTypeTp::LongLong)
.flag(FieldTypeFlag::UNSIGNED)
)
.evaluate::<Int>(ScalarFuncSig::MultiplyIntUnsigned)
.is_err(),
"{} * {} should fail",
lhs,
rhs
);
}
}
#[test]
fn test_decimal_divide() {
let cases = vec![
(Some("2.2"), Some("1.1"), Some("2.0")),
(Some("2.33"), Some("-0.01"), Some("-233")),
(Some("2.33"), Some("0.01"), Some("233")),
(None, Some("2"), None),
(Some("123"), None, None),
];
for (lhs, rhs, expected) in cases {
let actual = RpnFnScalarEvaluator::new()
.push_param(lhs.map(|s| Decimal::from_str(s).unwrap()))
.push_param(rhs.map(|s| Decimal::from_str(s).unwrap()))
.evaluate(ScalarFuncSig::DivideDecimal)
.unwrap();
let expected = expected.map(|s| Decimal::from_str(s).unwrap());
assert_eq!(actual, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
}
#[test]
fn test_real_divide() {
let normal = vec![
(Some(2.2), Some(1.1), Real::new(2.0).ok()),
(Some(2.33), Some(-0.01), Real::new(-233.0).ok()),
(Some(2.33), Some(0.01), Real::new(233.0).ok()),
(None, Some(2.0), None),
(Some(123.0), None, None),
];
for (lhs, rhs, expected) in normal {
let actual = RpnFnScalarEvaluator::new()
.push_param(lhs)
.push_param(rhs)
.evaluate(ScalarFuncSig::DivideReal)
.unwrap();
assert_eq!(actual, expected, "lhs={:?}, rhs={:?}", lhs, rhs);
}
let overflow = vec![(std::f64::MAX, 0.0001)];
for (lhs, rhs) in overflow {
assert!(RpnFnScalarEvaluator::new()
.push_param(lhs)
.push_param(rhs)
.evaluate::<Real>(ScalarFuncSig::DivideReal)
.is_err())
}
}
#[test]
fn test_divide_by_zero() {
let cases: Vec<(ScalarFuncSig, FieldTypeTp, ScalarValue, ScalarValue)> = vec![
(
ScalarFuncSig::DivideDecimal,
FieldTypeTp::NewDecimal,
Decimal::from_str("2.33").unwrap().into(),
Decimal::from_str("0.0").unwrap().into(),
),
(
ScalarFuncSig::DivideDecimal,
FieldTypeTp::NewDecimal,
Decimal::from_str("2.33").unwrap().into(),
Decimal::from_str("-0.0").unwrap().into(),
),
(
ScalarFuncSig::DivideReal,
FieldTypeTp::Double,
2.33.into(),
0.0.into(),
),
];
// Vec<[(Flag, SqlMode, is_ok(bool), has_warning(bool))]>
let modes = vec![
// Warning
(Flag::empty(), SqlMode::empty(), true, true),
// Error
(
Flag::IN_UPDATE_OR_DELETE_STMT,
SqlMode::ERROR_FOR_DIVISION_BY_ZERO | SqlMode::STRICT_ALL_TABLES,
false,
false,
),
// Ok
(
Flag::IN_UPDATE_OR_DELETE_STMT,
SqlMode::STRICT_ALL_TABLES,
true,
false,
),
// Warning
(
Flag::IN_UPDATE_OR_DELETE_STMT | Flag::DIVIDED_BY_ZERO_AS_WARNING,
SqlMode::ERROR_FOR_DIVISION_BY_ZERO | SqlMode::STRICT_ALL_TABLES,
true,
true,
),
];
for (sig, ret_field_type, lhs, rhs) in &cases {
for &(flag, sql_mode, is_ok, has_warning) in &modes {
// Construct an `EvalContext`
let mut config = EvalConfig::new();
config.set_flag(flag).set_sql_mode(sql_mode);
let (result, mut ctx) = RpnFnScalarEvaluator::new()
.context(EvalContext::new(std::sync::Arc::new(config)))
.push_param(lhs.to_owned())
.push_param(rhs.to_owned())
.evaluate_raw(*ret_field_type, *sig);
if is_ok {
assert!(result.unwrap().is_none());
} else {
assert!(result.is_err());
}
if has_warning {
assert_eq!(
ctx.take_warnings().warnings[0].get_code(),
ERR_DIVISION_BY_ZERO
);
} else {
assert!(ctx.take_warnings().warnings.is_empty());
}
}
}
}
} | |
impls.rs | use soft_ascii_string::{ SoftAsciiString, SoftAsciiChar, SoftAsciiStr };
use vec1::Vec1;
use grammar::encoded_word::{ MAX_ECW_LEN, ECW_SEP_OVERHEAD };
use ::encoder::EncodingWriter;
use super::{ EncodedWordWriter, EncodedWordEncoding as Encoding };
pub struct VecWriter<'a> {
data: Vec1<SoftAsciiString >,
charset: &'a SoftAsciiStr,
encoding: Encoding
}
impl<'a> VecWriter<'a> {
pub fn new(charset: &'a SoftAsciiStr, encoding: Encoding) -> Self {
let data = Vec1::new( SoftAsciiString::new() );
VecWriter { data, charset, encoding }
}
pub fn data( &self ) -> &[SoftAsciiString] {
&*self.data
}
}
impl<'a> Into<Vec1<SoftAsciiString>> for VecWriter<'a> {
fn into(self) -> Vec1<SoftAsciiString> {
self.data
}
}
impl<'a> EncodedWordWriter for VecWriter<'a> {
fn encoding( &self ) -> Encoding {
self.encoding
}
fn write_char( &mut self, ch: SoftAsciiChar ) {
self.data.last_mut().push( ch );
}
fn write_charset( &mut self ) {
self.data.last_mut().extend( self.charset.chars() )
}
fn write_ecw_seperator( &mut self ) {
self.data.push( SoftAsciiString::new() )
}
fn max_payload_len( &self ) -> usize {
MAX_ECW_LEN - ECW_SEP_OVERHEAD - self.charset.len() - 1
}
}
pub struct WriterWrapper<'a, 'b: 'a>{
charset: &'a SoftAsciiStr,
encoding: Encoding,
encoder_handle: &'a mut EncodingWriter<'b>
}
impl<'a, 'b: 'a> WriterWrapper<'a, 'b> {
pub fn | (charset: &'a SoftAsciiStr,
encoding: Encoding,
encoder: &'a mut EncodingWriter<'b> ) -> Self
{
WriterWrapper { charset, encoding, encoder_handle: encoder }
}
pub fn new(encoding: Encoding,
encoder: &'a mut EncodingWriter<'b> ) -> Self
{
Self::new_with_charset(SoftAsciiStr::from_unchecked("utf8"), encoding, encoder)
}
}
impl<'a, 'b: 'a> EncodedWordWriter for WriterWrapper<'a, 'b> {
fn encoding( &self ) -> Encoding {
self.encoding
}
fn write_charset( &mut self ) {
//TODO fix
let _ = self.encoder_handle.write_str( self.charset );
}
fn write_ecw_seperator( &mut self ) {
self.encoder_handle.write_fws();
}
fn write_char( &mut self, ch: SoftAsciiChar ) {
//TODO fix
let _ = self.encoder_handle.write_char( ch );
}
fn max_payload_len( &self ) -> usize {
MAX_ECW_LEN - ECW_SEP_OVERHEAD - self.charset.len() - 1
}
}
| new_with_charset |
FirstPersonEngines.ts |
import { CollectionCache, CollectionKey } from "../../../common";
export namespace FirstPersonEngines {
export const KEY = new CollectionKey("first_person_engines");
export class | {
private readonly collectionCache: CollectionCache;
readonly key: string;
readonly reloadTime: number;
readonly autoTarget: boolean;
readonly cameraOffsetX: number;
readonly cameraOffsetY: number;
readonly cameraOffsetZ: number;
readonly nearClippingPlane: number;
readonly trackProjectileDistance: number;
readonly halfAccuracyArc: number;
readonly halfHorizontalFireArc: number;
readonly halfVerticalFireArcElevation: number;
readonly turnDelay: number;
readonly halfVerticalFireArcDeclination: number;
readonly allowDirectControl: boolean;
readonly showTrajectory: boolean;
constructor(collectionCache: CollectionCache, values: any) {
this.collectionCache = collectionCache;
this.key = values["key"];
this.reloadTime = values["reload_time"];
this.autoTarget = !!values["auto_target"];
this.cameraOffsetX = values["camera_offset_x"];
this.cameraOffsetY = values["camera_offset_y"];
this.cameraOffsetZ = values["camera_offset_z"];
this.nearClippingPlane = values["near_clipping_plane"];
this.trackProjectileDistance = values["track_projectile_distance"];
this.halfAccuracyArc = values["half_accuracy_arc"];
this.halfHorizontalFireArc = values["half_horizontal_fire_arc"];
this.halfVerticalFireArcElevation = values["half_vertical_fire_arc_elevation"];
this.turnDelay = values["turn_delay"];
this.halfVerticalFireArcDeclination = values["half_vertical_fire_arc_declination"];
this.allowDirectControl = !!values["allow_direct_control"];
this.showTrajectory = !!values["show_trajectory"];
}
}
}
export default FirstPersonEngines;
| Entry |
cache_sticker.go | package core
import (
"github.com/DisgoOrg/disgo/discord"
)
type (
StickerFindFunc func(sticker *Sticker) bool
StickerCache interface {
Get(guildID discord.Snowflake, stickerID discord.Snowflake) *Sticker
GetCopy(guildID discord.Snowflake, stickerID discord.Snowflake) *Sticker
Set(sticker *Sticker) *Sticker
Remove(guildID discord.Snowflake, stickerID discord.Snowflake)
Cache() map[discord.Snowflake]map[discord.Snowflake]*Sticker
All() map[discord.Snowflake][]*Sticker
GuildCache(guildID discord.Snowflake) map[discord.Snowflake]*Sticker
GuildAll(guildID discord.Snowflake) []*Sticker
FindFirst(stickerFindFunc StickerFindFunc) *Sticker
FindAll(stickerFindFunc StickerFindFunc) []*Sticker
}
stickerCacheImpl struct {
cacheFlags CacheFlags
stickers map[discord.Snowflake]map[discord.Snowflake]*Sticker
}
)
func NewStickerCache(cacheFlags CacheFlags) StickerCache {
return &stickerCacheImpl{
cacheFlags: cacheFlags,
stickers: map[discord.Snowflake]map[discord.Snowflake]*Sticker{},
}
}
func (c *stickerCacheImpl) Get(guildID discord.Snowflake, stickerID discord.Snowflake) *Sticker {
if _, ok := c.stickers[guildID]; !ok {
return nil
}
return c.stickers[guildID][stickerID]
}
func (c *stickerCacheImpl) GetCopy(guildID discord.Snowflake, stickerID discord.Snowflake) *Sticker {
if sticker := c.Get(guildID, stickerID); sticker != nil {
st := *sticker
return &st
}
return nil
}
func (c *stickerCacheImpl) Set(sticker *Sticker) *Sticker {
if sticker.GuildID == nil {
return sticker
}
if c.cacheFlags.Missing(CacheFlagStickers) {
return sticker
}
if _, ok := c.stickers[*sticker.GuildID]; !ok {
c.stickers[*sticker.GuildID] = map[discord.Snowflake]*Sticker{}
}
st, ok := c.stickers[*sticker.GuildID][sticker.ID]
if ok |
c.stickers[*sticker.GuildID][sticker.ID] = sticker
return sticker
}
func (c *stickerCacheImpl) Remove(guildID discord.Snowflake, stickerID discord.Snowflake) {
if _, ok := c.stickers[guildID]; !ok {
return
}
delete(c.stickers[guildID], stickerID)
}
func (c *stickerCacheImpl) Cache() map[discord.Snowflake]map[discord.Snowflake]*Sticker {
return c.stickers
}
func (c *stickerCacheImpl) All() map[discord.Snowflake][]*Sticker {
stickers := make(map[discord.Snowflake][]*Sticker, len(c.stickers))
for guildID, guildStickers := range c.stickers {
stickers[guildID] = make([]*Sticker, len(guildStickers))
i := 0
for _, guildSticker := range guildStickers {
stickers[guildID] = append(stickers[guildID], guildSticker)
}
i++
}
return stickers
}
func (c *stickerCacheImpl) GuildCache(guildID discord.Snowflake) map[discord.Snowflake]*Sticker {
if _, ok := c.stickers[guildID]; !ok {
return nil
}
return c.stickers[guildID]
}
func (c *stickerCacheImpl) GuildAll(guildID discord.Snowflake) []*Sticker {
if _, ok := c.stickers[guildID]; !ok {
return nil
}
stickers := make([]*Sticker, len(c.stickers[guildID]))
i := 0
for _, sticker := range c.stickers[guildID] {
stickers = append(stickers, sticker)
i++
}
return stickers
}
func (c *stickerCacheImpl) FindFirst(stickerFindFunc StickerFindFunc) *Sticker {
for _, guildStickers := range c.stickers {
for _, sticker := range guildStickers {
if stickerFindFunc(sticker) {
return sticker
}
}
}
return nil
}
func (c *stickerCacheImpl) FindAll(stickerFindFunc StickerFindFunc) []*Sticker {
var stickers []*Sticker
for _, guildStickers := range c.stickers {
for _, sticker := range guildStickers {
if stickerFindFunc(sticker) {
stickers = append(stickers, sticker)
}
}
}
return stickers
}
| {
*st = *sticker
return st
} |
selection.js | import {Pos} from "../model"
import {ProseMirrorError} from "../util/error"
import {contains, browser} from "../dom"
import {posFromDOM, pathToDOM, DOMFromPos, coordsAtPos} from "./dompos"
// ;; Error type used to signal selection-related problems.
export class SelectionError extends ProseMirrorError {}
export class SelectionState {
constructor(pm, range) {
this.pm = pm
this.range = range
this.lastNonNodePos = null
this.polling = null
this.lastAnchorNode = this.lastHeadNode = this.lastAnchorOffset = this.lastHeadOffset = null
this.lastNode = null
pm.content.addEventListener("focus", () => this.receivedFocus())
this.poller = this.poller.bind(this)
}
setAndSignal(range, clearLast) {
this.set(range, clearLast)
// :: () #path=ProseMirror#events#selectionChange
// Indicates that the editor's selection has changed.
this.pm.signal("selectionChange")
}
set(range, clearLast) {
this.range = range
if (!range.node) this.lastNonNodePos = null
if (clearLast !== false) this.lastAnchorNode = null
}
poller() {
if (hasFocus(this.pm)) {
if (!this.pm.operation) this.readFromDOM()
this.polling = setTimeout(this.poller, 100)
} else {
this.polling = null
}
}
startPolling() {
clearTimeout(this.polling)
this.polling = setTimeout(this.poller, 50)
}
fastPoll() {
this.startPolling()
}
stopPolling() {
clearTimeout(this.polling)
this.polling = null
}
domChanged() {
let sel = window.getSelection()
return sel.anchorNode != this.lastAnchorNode || sel.anchorOffset != this.lastAnchorOffset ||
sel.focusNode != this.lastHeadNode || sel.focusOffset != this.lastHeadOffset
}
storeDOMState() {
let sel = window.getSelection()
this.lastAnchorNode = sel.anchorNode; this.lastAnchorOffset = sel.anchorOffset
this.lastHeadNode = sel.focusNode; this.lastHeadOffset = sel.focusOffset
}
readFromDOM() {
if (this.pm.input.composing || !hasFocus(this.pm) || !this.domChanged()) return false
let sel = window.getSelection(), doc = this.pm.doc
let anchor = posFromDOM(this.pm, sel.anchorNode, sel.anchorOffset)
let head = sel.isCollapsed ? anchor : posFromDOM(this.pm, sel.focusNode, sel.focusOffset)
let newRange = findSelectionNear(doc, head, this.range.head && this.range.head.cmp(head) < 0 ? -1 : 1)
if (newRange instanceof TextSelection && doc.path(anchor.path).isTextblock)
newRange = new TextSelection(anchor, newRange.head)
this.setAndSignal(newRange)
if (newRange instanceof NodeSelection || newRange.head.cmp(head) || newRange.anchor.cmp(anchor)) {
this.toDOM()
} else {
this.clearNode()
this.storeDOMState()
}
return true
}
toDOM(takeFocus) {
if (!hasFocus(this.pm)) {
if (!takeFocus) return
// See https://bugzilla.mozilla.org/show_bug.cgi?id=921444
else if (browser.gecko) this.pm.content.focus()
}
if (this.range instanceof NodeSelection)
this.nodeToDOM()
else
this.rangeToDOM()
}
nodeToDOM() {
let dom = pathToDOM(this.pm.content, this.range.from.toPath())
if (dom != this.lastNode) {
this.clearNode()
dom.classList.add("ProseMirror-selectednode")
this.pm.content.classList.add("ProseMirror-nodeselection")
this.lastNode = dom
}
let range = document.createRange(), sel = window.getSelection()
range.selectNode(dom)
sel.removeAllRanges()
sel.addRange(range)
this.storeDOMState()
}
rangeToDOM() {
this.clearNode()
let anchor = DOMFromPos(this.pm.content, this.range.anchor)
let head = DOMFromPos(this.pm.content, this.range.head)
let sel = window.getSelection(), range = document.createRange()
if (sel.extend) {
range.setEnd(anchor.node, anchor.offset)
range.collapse(false)
} else {
if (this.range.anchor.cmp(this.range.head) > 0) { let tmp = anchor; anchor = head; head = tmp }
range.setEnd(head.node, head.offset)
range.setStart(anchor.node, anchor.offset)
}
sel.removeAllRanges()
sel.addRange(range)
if (sel.extend)
sel.extend(head.node, head.offset)
this.storeDOMState()
}
clearNode() {
if (this.lastNode) {
this.lastNode.classList.remove("ProseMirror-selectednode")
this.pm.content.classList.remove("ProseMirror-nodeselection")
this.lastNode = null
return true
}
}
receivedFocus() {
if (this.polling == null) this.startPolling()
}
}
// ;; An editor selection. Can be one of two selection types:
// `TextSelection` and `NodeSelection`. Both have the properties
// listed here, but also contain more information (such as the
// selected [node](#NodeSelection.node) or the
// [head](#TextSelection.head) and [anchor](#TextSelection.anchor)).
export class Selection {
// :: Pos #path=Selection.prototype.from
// The start of the selection.
// :: Pos #path=Selection.prototype.to
// The end of the selection.
// :: bool #path=Selection.empty
// True if the selection is an empty text selection (head an anchor
// are the same).
// :: (other: Selection) → bool #path=Selection.eq
// Test whether the selection is the same as another selection.
// :: (doc: Node, mapping: Mappable) → Selection #path=Selection.map
// Map this selection through a [mappable](#Mappable) thing. `doc`
// should be the new document, to which we are mapping.
}
// ;; A text selection represents a classical editor
// selection, with a head (the moving side) and anchor (immobile
// side), both of which point into textblock nodes. It can be empty (a
// regular cursor position).
export class TextSelection extends Selection {
// :: (Pos, ?Pos)
// Construct a text selection. When `head` is not given, it defaults
// to `anchor`.
constructor(anchor, head) {
super()
// :: Pos
// The selection's immobile side (does not move when pressing
// shift-arrow).
this.anchor = anchor
// :: Pos
// The selection's mobile side (the side that moves when pressing
// shift-arrow).
this.head = head || anchor
}
get inverted() { return this.anchor.cmp(this.head) > 0 }
get from() { return this.inverted ? this.head : this.anchor }
get to() { return this.inverted ? this.anchor : this.head }
get empty() { return this.anchor.cmp(this.head) == 0 }
eq(other) {
return other instanceof TextSelection && !other.head.cmp(this.head) && !other.anchor.cmp(this.anchor)
}
map(doc, mapping) {
let head = mapping.map(this.head).pos
if (!doc.path(head.path).isTextblock)
return findSelectionNear(doc, head)
let anchor = mapping.map(this.anchor).pos
return new TextSelection(doc.path(anchor.path).isTextblock ? anchor : head, head)
}
}
// ;; A node selection is a selection that points at a
// single node. All nodes marked [selectable](#NodeType.selectable)
// can be the target of a node selection. In such an object, `from`
// and `to` point directly before and after the selected node.
export class NodeSelection extends Selection {
// :: (Pos, Pos, Node)
// Create a node selection. Does not verify the validity of its
// arguments. Use `ProseMirror.setNodeSelection` for an easier,
// error-checking way to create a node selection.
constructor(from, to, node) {
super()
this.from = from
this.to = to
// :: Node The selected node.
this.node = node
}
get empty() { return false }
eq(other) {
return other instanceof NodeSelection && !this.from.cmp(other.from)
}
map(doc, mapping) {
let from = mapping.map(this.from, 1).pos
let to = mapping.map(this.to, -1).pos
if (Pos.samePath(from.path, to.path) && from.offset == to.offset - 1) {
let node = doc.nodeAfter(from)
if (node.type.selectable) return new NodeSelection(from, to, node)
}
return findSelectionNear(doc, from)
}
}
export function rang | {
if (!hasFocus(pm)) return null
let sel = window.getSelection()
return new TextSelection(posFromDOM(pm, sel.anchorNode, sel.anchorOffset, true),
posFromDOM(pm, sel.focusNode, sel.focusOffset, true))
}
export function hasFocus(pm) {
let sel = window.getSelection()
return sel.rangeCount && contains(pm.content, sel.anchorNode)
}
function findSelectionIn(doc, path, offset, dir, text) {
let node = doc.path(path)
if (node.isTextblock) return new TextSelection(new Pos(path, offset))
for (let i = offset + (dir > 0 ? 0 : -1); dir > 0 ? i < node.size : i >= 0; i += dir) {
let child = node.child(i)
if (!text && child.type.contains == null && child.type.selectable)
return new NodeSelection(new Pos(path, i), new Pos(path, i + 1), child)
path.push(i)
let inside = findSelectionIn(doc, path, dir < 0 ? child.size : 0, dir, text)
if (inside) return inside
path.pop()
}
}
// FIXME we'll need some awareness of bidi motion when determining block start and end
export function findSelectionFrom(doc, pos, dir, text) {
for (let path = pos.path.slice(), offset = pos.offset;;) {
let found = findSelectionIn(doc, path, offset, dir, text)
if (found) return found
if (!path.length) break
offset = path.pop() + (dir > 0 ? 1 : 0)
}
}
export function findSelectionNear(doc, pos, bias = 1, text) {
let result = findSelectionFrom(doc, pos, bias, text) || findSelectionFrom(doc, pos, -bias, text)
if (!result) SelectionError("Searching for selection in invalid document " + doc)
return result
}
export function findSelectionAtStart(node, path = [], text) {
return findSelectionIn(node, path.slice(), 0, 1, text)
}
export function findSelectionAtEnd(node, path = [], text) {
return findSelectionIn(node, path.slice(), node.size, -1, text)
}
export function verticalMotionLeavesTextblock(pm, pos, dir) {
let dom = pathToDOM(pm.content, pos.path)
let coords = coordsAtPos(pm, pos)
for (let child = dom.firstChild; child; child = child.nextSibling) {
if (child.nodeType != 1) continue
let boxes = child.getClientRects()
for (let i = 0; i < boxes.length; i++) {
let box = boxes[i]
if (dir < 0 ? box.bottom < coords.top : box.top > coords.bottom)
return false
}
}
return true
}
| eFromDOMLoose(pm) |
gt_kai.py | # Gym-TORCS-Kai Environment for Reinforcement Learning in TORCS
# original author : Naoto Yoshida
# (https://github.com/ugo-nama-kun/gym_torcs)
# modified version author : Daiko Kishikawa
#
# This environment is under modification. (2019.12)
#
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import sys
sys.path.append("./gym_torcs_kai")
import snakeoil3_gym as snakeoil3
import os
import time
class TorcsKaiEnv(gym.Env):
# the speed limit starts when the number of steps exceeds this
terminal_judge_start = 500
# episode terminates when the car is running slower than this limit
termination_limit_progress = 5
# whether to initialize when resetting the environment
initial_reset = True
def __init__(self, throttle=False, gear_change=False):
print("=== Hello, this is Gym-TORCS-Kai. ===")
############################ PARAMETERS OF DRIVING ############################
""" throttle (bool) : usage of the throttle control in TORCS. """
""" gear_change (bool) : usage of the gear control in TORCS. """
""" obsdim (int) : the number of observation (state input) dimensions."""
# Currently, three types of dimensions are supported: "2", "31", "79".
# "2" : the minimum number of dimensions required for driving.
# "31" : the number of dimensions required for a single agent to drive normally.
# "79" : the number of dimensions using all available inputs.
""" maximum_distance (float) : the maximum distance when finish driving. """
""" default_speed (float) : the target speed for acceleration/deceleration. """
self.throttle = throttle
self.gear_change = gear_change
self.obsdim = 31
self.maximum_distance = 1908.32
self.default_speed = 100
##################################################################################
print("--> throttle : ", self.throttle)
print("--> gear : ", self.gear_change)
print("--> dim. of observ. : ", self.obsdim)
print("--> max. dist. : ", self.maximum_distance, " m")
print("--> targ. speed : ", self.default_speed, "km/h")
# Initialization of the driving in TORCS.
self.initial_run = True
# variable for calculating Y-axis acceleration
self.speedY = 0
self.time = 0
# variable for recording the current number of steps
self.time_step = 0
# the range of reward function
self.reward_range = (-10, 10)
self.testmode = False
# lists for recording vehicle status
self.Yaclist = []
self.poshis = []
self.anglehis = []
self.sphis = []
# launch TORCS system
os.system("pkill torcs")
time.sleep(0.5)
if self.obsdim == 79:
os.system("torcs &")
else:
os.system("torcs -nofuel -nodamage -nolaptime &")
time.sleep(0.5)
os.system("sh ./gym_torcs_kai/autostart.sh")
time.sleep(0.5)
"""
# Modify here if you use multiple tracks in the environment
self.client = snakeoil3.Client(p=3101, vision=False) # Open new UDP in vtorcs
self.client.MAX_STEPS = np.inf
client = self.client
client.get_servers_input() # Get the initial input from torcs
obs = client.S.d # Get the current full-observation from torcs
"""
# definitions of action space ranges
if throttle is False:
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,))
else:
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,))
# definitions of observation space ranges
if self.obsdim == 79:
high = np.array([np.pi, # angle
np.inf, # curLapTime
np.inf, # damage
np.inf, # distFromStart
np.inf, # distRaced
# focus (5 dim.)
200, 200, 200, 200, 200,
np.inf, # fuel
6, # gear
np.inf, # lastLapTime
# opponents (36 dim.)
200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200,
np.inf, # racePos
np.inf, # rpm
np.inf, # speedX
np.inf, # speedY
np.inf, # speedZ
# track (19 dim.)
200, 200, 200, 200, 200,
200, 200, 200, 200, 200,
200, 200, 200, 200, 200,
200, 200, 200, 200,
np.inf, # trackPos
# wheelSpinVel (4 dim.)
np.inf, np.inf, np.inf, np.inf,
np.inf, # z
])
low = np.array([-np.pi, # angle
0, # curLapTime
0, # damage
0, # distFromStart
0, # distRaced
# focus (5 dim.)
0, 0, 0, 0, 0,
0, # fuel
-1, # gear
0, # lastLapTime
# opponents (36 dim.)
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
1, # racePos
0, # rpm
-np.inf, # speedX
-np.inf, # speedY
-np.inf, # speedZ
# track (19 dim.)
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0,
-np.inf, # trackPos
# wheelSpinVel (4 dim.)
0, 0, 0, 0,
-np.inf, # z
])
elif self.obsdim == 2:
high = np.array([np.pi, # angle
np.inf]) # trackPos
low = np.array([-np.pi, # angle
-np.inf]) # trackPos
elif self.obsdim == 31:
high = np.array([np.pi, # angle
6, # gear
np.inf, # rpm
np.inf, # speedX
np.inf, # speedY
np.inf, # speedZ
# track (19 dim.)
200, 200, 200, 200, 200,
200, 200, 200, 200, 200,
200, 200, 200, 200, 200,
200, 200, 200, 200,
np.inf, # trackPos
# wheelSpinVel (4 dim.)
np.inf, np.inf, np.inf, np.inf,
np.inf, # z
])
low = np.array([-np.pi, # angle
-1, # gear
0, # rpm
-np.inf, # speedX
-np.inf, # speedY
-np.inf, # speedZ
# track (19 dim.)
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0,
-np.inf, # trackPos
# wheelSpinVel (4 dim.)
0, 0, 0, 0,
-np.inf, # z
])
else:
low = None
high = None
self.observation_space = spaces.Box(low=low, high=high)
# For evaluation episodes, set to “test mode” to not display logs.
def testset(self, test):
self.testmode = test
# Set learning parameter
def set_params(self, throttle, gear, dim, max_dist, targ_speed):
#params: [throttle, gear, dim, max_dist, targ_speed]
self.throttle = throttle
self.gear_change = gear
self.obsdim = dim
self.maximum_distance = max_dist
self.default_speed = targ_speed
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# "step" function
def step(self, u):
# convert thisAction to the actual torcs actionstr
client = self.client
this_action = self.agent_to_torcs(u)
# apply actions in TORCS
action_torcs = client.R.d
# steering control from the agent
action_torcs["steer"] = this_action["steer"] # in [-1, 1]
# simple automatic throttle control by Snakeoil
if self.throttle is False:
target_speed = self.default_speed
if client.S.d["speedX"] < target_speed - (client.R.d["steer"] * 50):
if client.R.d["accel"] + 0.1 <= 1:
client.R.d["accel"] += 0.1
else:
if client.R.d["accel"] - 0.1 >= 0:
client.R.d["accel"] -= 0.1
if client.S.d["speedX"] < 10:
if (client.S.d["speedX"] + 0.1) != 0:
clie | # traction control system
if (client.S.d["wheelSpinVel"][2] + client.S.d["wheelSpinVel"][3]) - (
client.S.d["wheelSpinVel"][0] + client.S.d["wheelSpinVel"][1]
) > 5:
action_torcs["accel"] -= 0.2
else:
action_torcs["accel"] = this_action["accel"]
# gear control from agent
if self.gear_change is True:
action_torcs["gear"] = this_action["gear"]
else:
# automatic gear control
action_torcs["gear"] = 1
if client.S.d["speedX"] > 50:
action_torcs["gear"] = 2
if client.S.d["speedX"] > 80:
action_torcs["gear"] = 3
if client.S.d["speedX"] > 110:
action_torcs["gear"] = 4
if client.S.d["speedX"] > 140:
action_torcs["gear"] = 5
if client.S.d["speedX"] > 170:
action_torcs["gear"] = 6
# one-step dynamics update #################################
# apply actions into TORCS
client.respond_to_server()
# get the response from TORCS
client.get_servers_input()
# get the current full-observation from TORCS
obs = client.S.d
# make an observation from a raw observation vector from TORCS
self.observation = self.make_observaton(obs)
# calculation of progress
progress = np.array(obs["speedX"]) * np.cos(obs["angle"])
# Designed Reward Function #######################################
# This reward function enables agents to learn stable high-speed driving
# with low Y-axis acceleration.
# This reward function was designed after trial and error by me.
if (obs["curLapTime"] - self.time) > 0:
Yac = (obs["speedY"] - self.speedY) / (obs["curLapTime"] - self.time)
else:
Yac = 0
self.speedY = obs["speedY"]
self.time = obs["curLapTime"]
self.Yaclist.append(Yac)
self.poshis.append(obs["trackPos"])
self.anglehis.append(obs["angle"])
self.sphis.append(obs["speedX"])
# reward for the low Y-axis acceleration
eta_Yac = 1
r_Yac = 1 / ((Yac / eta_Yac) ** 2 + 1)
# reward for the small angle : 0 ~ 1
eta_angle = 0.01
r_angle = 1 / ((obs["angle"] / eta_angle) ** 2 + 1)
# reward for the small position from center : 0 ~ 1
eta_pos = 0.01
r_trackPos = 1 / ((obs["trackPos"] / eta_pos) ** 2 + 1)
# reward for the high X-axis speed : 0 ~ 1
maxspeed = 100
if obs["speedX"] >= 0:
r_speed = min(obs["speedX"] / maxspeed, 1)
else:
r_speed = 0
# reward function: -1 ~ 1
reward = 0.2 * r_angle + 0.2 * r_trackPos + 0.3 * r_speed + 0.3 * r_Yac
Yac_threshold = 3.530394 # 0.1G
if np.abs(Yac) > Yac_threshold:
reward = -min(np.abs(Yac) / 250, 1)
# Termination judgement #########################
track = np.array(obs["track"])
# episode terminates when the car is out of track
if track.min() < 0:
reward = -10
client.R.d["meta"] = True
# episode terminates if the progress of agent is little
if self.terminal_judge_start < self.time_step:
if progress < self.termination_limit_progress:
reward = -10
client.R.d["meta"] = True
# episode terminates if the agent runs backward
if np.cos(obs["angle"]) < 0 or obs["distRaced"] < 0:
reward = -10
client.R.d["meta"] = True
# episode terminates when the agent reaches the maximum distance
if obs["distRaced"] >= self.maximum_distance:
reward = 10
client.R.d["meta"] = True
if client.R.d["meta"] is True: # send a reset signal
poshis = np.array(self.poshis)
anglehis = np.array(self.anglehis)
sphis = np.array(self.sphis)
Yachis = np.array(self.Yaclist)
# For training episodes, display information about the vehicle in the finished driving
if self.testmode == False:
print("---------------------------------------------------------")
print("---> raced: ", obs["distRaced"], " m <---")
print("--- maxYac: ", np.max(Yachis), " km/h/s ---")
print("--- minYac: ", np.min(Yachis), " km/h/s ---")
if abs(np.max(Yachis)) >= abs(np.min(Yachis)):
absmaxYac = abs(np.max(Yachis))
else:
absmaxYac = abs(np.min(Yachis))
print("--- absmaxYac: ", absmaxYac, " km/h/s ---")
print("--- meanYac: ", np.mean(Yachis), " km/h/s +- ", np.std(Yachis), "---")
print("--- medianYac: ", np.median(Yachis), " km/h/s ---")
print("--- trackPos_mean: ", np.mean(poshis), " +- ", np.std(poshis), " ---")
print("--- angle_mean : ", np.mean(anglehis), " rad +- ", np.std(anglehis), " ---")
print("--- speedX_mean: ", np.mean(sphis), " km/h +- ", np.std(sphis), " ---")
print("---------------------------------------------------------")
self.initial_run = False
client.respond_to_server()
self.time_step += 1
return self.get_obs(), reward, client.R.d["meta"], {}
def reset(self, relaunch=False):
self.time_step = 0
# If not true, send a reset signal to TORCS when the reset function is called
if self.initial_reset is not True:
self.client.R.d["meta"] = True
self.client.respond_to_server()
## TENTATIVE. Restarting TORCS for every episode will cause the memory leak bug!
if relaunch is True:
self.reset_torcs()
# Modify here if you use multiple tracks in the environment
# Open new UDP in vtorcs
self.client = snakeoil3.Client(p=3101, vision=False)
self.client.MAX_STEPS = np.inf
client = self.client
# get the initial input from TORCS
client.get_servers_input()
# get the current full observation from TORCS
obs = client.S.d
self.observation = self.make_observaton(obs)
# reset variables and lists
self.speedY = obs["speedY"]
self.time = obs["curLapTime"]
self.Yaclist = []
self.poshis = []
self.anglehis = []
self.sphis = []
self.initial_reset = False
return self.get_obs()
def close(self):
os.system("pkill torcs")
def render(self, mode="human"):
# TORCS has a monitor of driving, so this method omitted.
pass
####################################### making observation ############################################
def get_obs(self):
return self.observation
def reset_torcs(self):
os.system("pkill torcs")
time.sleep(0.5)
if self.obsdim == 79:
os.system("torcs &")
elif self.obsdim == 2:
os.system("torcs -nofuel -nodamage -nolaptime &")
else:
os.system("torcs -nofuel -nodamage -nolaptime &")
time.sleep(0.5)
os.system("sh ./gym_torcs_kai/autostart.sh")
time.sleep(0.5)
def agent_to_torcs(self, u):
torcs_action = {"steer": u[0]}
if self.throttle is True: # throttle action is enabled
torcs_action.update({"accel": u[1]})
if self.gear_change is True: # gear change action is enabled
torcs_action.update({"gear": u[2]})
return torcs_action
def make_observaton(self, raw_obs):
if self.obsdim == 79:
obs1 = np.array(
[
raw_obs["angle"],
raw_obs["curLapTime"],
raw_obs["damage"],
raw_obs["distFromStart"],
raw_obs["distRaced"],
]
)
focus = raw_obs["focus"]
obs2 = np.array([raw_obs["fuel"], raw_obs["gear"], raw_obs["lastLapTime"]])
opponents = raw_obs["opponents"]
obs3 = np.array(
[
raw_obs["racePos"],
raw_obs["rpm"],
raw_obs["speedX"],
raw_obs["speedY"],
raw_obs["speedZ"],
]
)
track = raw_obs["track"]
trackPos = np.array([raw_obs["trackPos"]])
wheelSpinVel = raw_obs["wheelSpinVel"]
z = np.array(raw_obs["z"])
observ = np.hstack(
[obs1, focus, obs2, opponents, obs3, track, trackPos, wheelSpinVel, z]
)
return observ
elif self.obsdim == 2:
return np.array([raw_obs["angle"], raw_obs["trackPos"]])
elif self.obsdim == 31:
obs1 = np.array(
[
raw_obs["angle"],
raw_obs["gear"],
raw_obs["rpm"],
raw_obs["speedX"],
raw_obs["speedY"],
raw_obs["speedZ"],
]
)
trackPos = np.array([raw_obs["trackPos"]])
z = np.array(raw_obs["z"])
observ = np.hstack(
[obs1, raw_obs["track"], trackPos, raw_obs["wheelSpinVel"], z]
)
return observ
else:
return None
| nt.R.d["accel"] += 1 / (client.S.d["speedX"] + 0.1)
|
registry.go | package plugins
import (
"bytes"
"fmt"
"io"
"net/http"
"net/url"
"path/filepath"
"regexp"
"sort"
"strings"
"sync"
"syscall"
"time"
"context"
log "github.com/sirupsen/logrus"
"github.com/ugorji/go/codec"
"golang.org/x/net/context/ctxhttp"
"github.com/weaveworks/common/backoff"
"github.com/weaveworks/common/fs"
"github.com/weaveworks/scope/common/xfer"
"github.com/weaveworks/scope/probe/controls"
"github.com/weaveworks/scope/report"
)
// Exposed for testing
var (
transport = makeUnixRoundTripper
maxResponseBytes int64 = 50 * 1024 * 1024
errResponseTooLarge = fmt.Errorf("response must be shorter than 50MB")
validPluginName = regexp.MustCompile("^[A-Za-z0-9]+([-][A-Za-z0-9]+)*$")
)
const (
pluginTimeout = 500 * time.Millisecond
scanningInterval = 5 * time.Second
)
// ReportPublisher is an interface for publishing reports immediately
type ReportPublisher interface {
Publish(rpt report.Report)
}
// Registry maintains a list of available plugins by name.
type Registry struct {
rootPath string
apiVersion string
handshakeMetadata map[string]string
pluginsBySocket map[string]*Plugin
lock sync.RWMutex
context context.Context
cancel context.CancelFunc
controlsByPlugin map[string]report.StringSet
pluginsByID map[string]*Plugin
handlerRegistry *controls.HandlerRegistry
publisher ReportPublisher
}
// NewRegistry creates a new registry which watches the given dir root for new
// plugins, and adds them.
func NewRegistry(rootPath, apiVersion string, handshakeMetadata map[string]string, handlerRegistry *controls.HandlerRegistry, publisher ReportPublisher) (*Registry, error) {
ctx, cancel := context.WithCancel(context.Background())
r := &Registry{
rootPath: rootPath,
apiVersion: apiVersion,
handshakeMetadata: handshakeMetadata,
pluginsBySocket: map[string]*Plugin{},
context: ctx,
cancel: cancel,
controlsByPlugin: map[string]report.StringSet{},
pluginsByID: map[string]*Plugin{},
handlerRegistry: handlerRegistry,
publisher: publisher,
}
if err := r.scan(); err != nil {
r.Close()
return nil, err
}
go r.loop()
return r, nil
}
// loop periodically rescans for plugins
func (r *Registry) loop() {
ticker := time.NewTicker(scanningInterval)
defer ticker.Stop()
for {
select {
case <-r.context.Done():
return
case <-ticker.C:
log.Debugf("plugins: scanning...")
if err := r.scan(); err != nil {
log.Warningf("plugins: error: %v", err)
}
}
}
}
// Rescan the plugins directory, load new plugins, and remove missing plugins
func (r *Registry) scan() error {
sockets, err := r.sockets(r.rootPath)
if err != nil {
return err
}
r.lock.Lock()
defer r.lock.Unlock()
plugins := map[string]*Plugin{}
pluginsByID := map[string]*Plugin{}
// add (or keep) plugins which were found
for _, path := range sockets {
if plugin, ok := r.pluginsBySocket[path]; ok {
plugins[path] = plugin
pluginsByID[plugin.PluginSpec.ID] = plugin
continue
}
tr, err := transport(path, pluginTimeout)
if err != nil {
log.Warningf("plugins: error loading plugin %s: %v", path, err)
continue
}
client := &http.Client{Transport: tr, Timeout: pluginTimeout}
plugin, err := NewPlugin(r.context, path, client, r.apiVersion, r.handshakeMetadata)
if err != nil {
log.Warningf("plugins: error loading plugin %s: %v", path, err)
continue
}
plugins[path] = plugin
pluginsByID[plugin.PluginSpec.ID] = plugin
log.Infof("plugins: added plugin %s", path)
}
// remove plugins which weren't found
pluginsToClose := map[string]*Plugin{}
for path, plugin := range r.pluginsBySocket {
if _, ok := plugins[path]; !ok {
pluginsToClose[plugin.PluginSpec.ID] = plugin
log.Infof("plugins: removed plugin %s", plugin.socket)
}
}
r.closePlugins(pluginsToClose)
r.pluginsBySocket = plugins
r.pluginsByID = pluginsByID
return nil
}
// sockets recursively finds all unix sockets under the path provided
func (r *Registry) sockets(path string) ([]string, error) {
var (
result []string
statT syscall.Stat_t
)
// TODO: use of fs.Stat (which is syscall.Stat) here makes this linux specific.
if err := fs.Stat(path, &statT); err != nil {
return nil, err
}
switch statT.Mode & syscall.S_IFMT {
case syscall.S_IFDIR:
files, err := fs.ReadDir(path)
if err != nil {
return nil, err
}
for _, file := range files {
fpath := filepath.Join(path, file.Name())
s, err := r.sockets(fpath)
if err != nil {
log.Warningf("plugins: error loading path %s: %v", fpath, err)
}
result = append(result, s...)
}
case syscall.S_IFSOCK:
result = append(result, path)
}
return result, nil
}
// forEach walks through all the plugins running f for each one.
func (r *Registry) forEach(lock sync.Locker, f func(p *Plugin)) {
lock.Lock()
defer lock.Unlock()
paths := []string{}
for path := range r.pluginsBySocket {
paths = append(paths, path)
}
sort.Strings(paths)
for _, path := range paths {
f(r.pluginsBySocket[path])
}
}
// ForEach walks through all the plugins running f for each one.
func (r *Registry) ForEach(f func(p *Plugin)) {
r.forEach(r.lock.RLocker(), f)
}
// Implementers walks the available plugins fulfilling the given interface
func (r *Registry) Implementers(iface string, f func(p *Plugin)) {
r.ForEach(func(p *Plugin) {
for _, piface := range p.Interfaces {
if piface == iface {
f(p)
}
}
})
}
// Name implements the Reporter interface
func (r *Registry) Name() string { return "plugins" }
// Report implements the Reporter interface
func (r *Registry) Report() (report.Report, error) {
rpt := report.MakeReport()
// All plugins are assumed to (and must) implement reporter
r.forEach(&r.lock, func(plugin *Plugin) {
pluginReport, err := plugin.Report()
if err != nil {
log.Errorf("plugins: %s: /report error: %v", plugin.socket, err)
}
if plugin.Implements("controller") {
r.updateAndRegisterControlsInReport(&pluginReport)
}
rpt.UnsafeMerge(pluginReport)
})
return rpt, nil
}
func (r *Registry) updateAndRegisterControlsInReport(rpt *report.Report) {
key := rpt.Plugins.Keys()[0]
spec, _ := rpt.Plugins.Lookup(key)
pluginID := spec.ID
var newPluginControls []string
rpt.WalkTopologies(func(topology *report.Topology) {
newPluginControls = append(newPluginControls, r.updateAndGetControlsInTopology(pluginID, topology)...)
})
r.updatePluginControls(pluginID, report.MakeStringSet(newPluginControls...))
}
func (r *Registry) updateAndGetControlsInTopology(pluginID string, topology *report.Topology) []string {
var pluginControls []string
newControls := report.Controls{}
for controlID, control := range topology.Controls {
fakeID := fakeControlID(pluginID, controlID)
log.Debugf("plugins: replacing control %s with %s", controlID, fakeID)
control.ID = fakeID
newControls.AddControl(control)
pluginControls = append(pluginControls, controlID)
}
newNodes := report.Nodes{}
for name, node := range topology.Nodes {
log.Debugf("plugins: checking node controls in node %s of %s", name, topology.Label)
newNode := node.WithID(name)
newLatestControls := []string{}
for _, controlID := range node.ActiveControls() {
log.Debugf("plugins: got node control %s", controlID)
newControlID := ""
if _, found := topology.Controls[controlID]; !found | else {
newControlID = fakeControlID(pluginID, controlID)
log.Debugf("plugins: will replace node control %s with %s", controlID, newControlID)
}
newLatestControls = append(newLatestControls, newControlID)
}
newNode = newNode.WithLatestActiveControls(newLatestControls...)
newNodes[newNode.ID] = newNode
}
topology.Controls = newControls
topology.Nodes = newNodes
return pluginControls
}
func (r *Registry) updatePluginControls(pluginID string, newPluginControls report.StringSet) {
oldFakePluginControls := r.fakePluginControls(pluginID)
newFakePluginControls := map[string]xfer.ControlHandlerFunc{}
for _, controlID := range newPluginControls {
newFakePluginControls[fakeControlID(pluginID, controlID)] = r.pluginControlHandler
}
r.handlerRegistry.Batch(oldFakePluginControls, newFakePluginControls)
r.controlsByPlugin[pluginID] = newPluginControls
}
// PluginResponse is an extension of xfer.Response that allows plugins
// to send the shortcut reports
type PluginResponse struct {
xfer.Response
ShortcutReport *report.Report `json:"shortcutReport,omitempty"`
}
func (r *Registry) pluginControlHandler(req xfer.Request) xfer.Response {
pluginID, controlID := realPluginAndControlID(req.Control)
req.Control = controlID
r.lock.RLock()
defer r.lock.RUnlock()
if plugin, found := r.pluginsByID[pluginID]; found {
response := plugin.Control(req)
if response.ShortcutReport != nil {
r.updateAndRegisterControlsInReport(response.ShortcutReport)
response.ShortcutReport.Shortcut = true
r.publisher.Publish(*response.ShortcutReport)
}
return response.Response
}
return xfer.ResponseErrorf("plugin %s not found", pluginID)
}
func realPluginAndControlID(fakeID string) (string, string) {
parts := strings.SplitN(fakeID, "~", 2)
if len(parts) != 2 {
return "", fakeID
}
return parts[0], parts[1]
}
// Close shuts down the registry. It can still be used after this, but will be
// out of date.
func (r *Registry) Close() {
r.cancel()
r.lock.Lock()
defer r.lock.Unlock()
r.closePlugins(r.pluginsByID)
}
func (r *Registry) closePlugins(plugins map[string]*Plugin) {
var toRemove []string
for pluginID, plugin := range plugins {
toRemove = append(toRemove, r.fakePluginControls(pluginID)...)
delete(r.controlsByPlugin, pluginID)
plugin.Close()
}
r.handlerRegistry.Batch(toRemove, nil)
}
func (r *Registry) fakePluginControls(pluginID string) []string {
oldPluginControls := r.controlsByPlugin[pluginID]
var oldFakePluginControls []string
for _, controlID := range oldPluginControls {
oldFakePluginControls = append(oldFakePluginControls, fakeControlID(pluginID, controlID))
}
return oldFakePluginControls
}
func fakeControlID(pluginID, controlID string) string {
return fmt.Sprintf("%s~%s", pluginID, controlID)
}
// Plugin is the implementation of a plugin. It is responsible for doing the
// plugin handshake, gathering reports, etc.
type Plugin struct {
xfer.PluginSpec
context context.Context
socket string
expectedAPIVersion string
handshakeMetadata url.Values
client *http.Client
cancel context.CancelFunc
backoff backoff.Interface
}
// NewPlugin loads and initializes a new plugin. If client is nil,
// http.DefaultClient will be used.
func NewPlugin(ctx context.Context, socket string, client *http.Client, expectedAPIVersion string, handshakeMetadata map[string]string) (*Plugin, error) {
id := strings.TrimSuffix(filepath.Base(socket), filepath.Ext(socket))
if !validPluginName.MatchString(id) {
return nil, fmt.Errorf("invalid plugin id %q", id)
}
params := url.Values{}
for k, v := range handshakeMetadata {
params.Add(k, v)
}
ctx, cancel := context.WithCancel(ctx)
plugin := &Plugin{
PluginSpec: xfer.PluginSpec{ID: id, Label: id},
context: ctx,
socket: socket,
expectedAPIVersion: expectedAPIVersion,
handshakeMetadata: params,
client: client,
cancel: cancel,
}
return plugin, nil
}
// Report gets the latest report from the plugin
func (p *Plugin) Report() (result report.Report, err error) {
result = report.MakeReport()
defer func() {
p.setStatus(err)
result.Plugins = result.Plugins.Add(p.PluginSpec)
if err != nil {
result = report.MakeReport()
result.Plugins = xfer.MakePluginSpecs(p.PluginSpec)
}
}()
if err := p.get("/report", p.handshakeMetadata, &result); err != nil {
return result, err
}
if result.Plugins.Size() != 1 {
return result, fmt.Errorf("report must contain exactly one plugin (found %d)", result.Plugins.Size())
}
key := result.Plugins.Keys()[0]
spec, _ := result.Plugins.Lookup(key)
if spec.ID != p.PluginSpec.ID {
return result, fmt.Errorf("plugin must not change its id (is %q, should be %q)", spec.ID, p.PluginSpec.ID)
}
p.PluginSpec = spec
switch {
case spec.APIVersion != p.expectedAPIVersion:
err = fmt.Errorf("incorrect API version: expected %q, got %q", p.expectedAPIVersion, spec.APIVersion)
case spec.Label == "":
err = fmt.Errorf("spec must contain a label")
case !p.Implements("reporter"):
err = fmt.Errorf("spec must implement the \"reporter\" interface")
}
return result, err
}
// Control sends a control message to a plugin
func (p *Plugin) Control(request xfer.Request) (res PluginResponse) {
var err error
defer func() {
p.setStatus(err)
if err != nil {
res = PluginResponse{Response: xfer.ResponseError(err)}
}
}()
if p.Implements("controller") {
err = p.post("/control", p.handshakeMetadata, request, &res)
} else {
err = fmt.Errorf("the %s plugin does not implement the controller interface", p.PluginSpec.Label)
}
return res
}
// Implements checks if the plugin implements the given interface
func (p *Plugin) Implements(iface string) bool {
for _, i := range p.PluginSpec.Interfaces {
if i == iface {
return true
}
}
return false
}
func (p *Plugin) setStatus(err error) {
if err == nil {
p.Status = "ok"
} else {
p.Status = fmt.Sprintf("error: %v", err)
}
}
func (p *Plugin) get(path string, params url.Values, result interface{}) error {
// Context here lets us either timeout req. or cancel it in Plugin.Close
ctx, cancel := context.WithTimeout(p.context, pluginTimeout)
defer cancel()
resp, err := ctxhttp.Get(ctx, p.client, fmt.Sprintf("http://plugin%s?%s", path, params.Encode()))
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("plugin returned non-200 status code: %s", resp.Status)
}
return getResult(resp.Body, result)
}
func (p *Plugin) post(path string, params url.Values, data interface{}, result interface{}) error {
// Context here lets us either timeout req. or cancel it in Plugin.Close
ctx, cancel := context.WithTimeout(p.context, pluginTimeout)
defer cancel()
buf := &bytes.Buffer{}
if err := codec.NewEncoder(buf, &codec.JsonHandle{}).Encode(data); err != nil {
return fmt.Errorf("encoding error: %s", err)
}
resp, err := ctxhttp.Post(ctx, p.client, fmt.Sprintf("http://plugin%s?%s", path, params.Encode()), "application/json", buf)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("plugin returned non-200 status code: %s", resp.Status)
}
return getResult(resp.Body, result)
}
func getResult(body io.ReadCloser, result interface{}) error {
err := codec.NewDecoder(MaxBytesReader(body, maxResponseBytes, errResponseTooLarge), &codec.JsonHandle{}).Decode(&result)
if err == errResponseTooLarge {
return err
}
if err != nil {
return fmt.Errorf("decoding error: %s", err)
}
return nil
}
// Close closes the client
func (p *Plugin) Close() {
if p.backoff != nil {
p.backoff.Stop()
}
p.cancel()
}
| {
log.Debugf("plugins: node control %s does not exist in topology controls", controlID)
newControlID = controlID
} |
value.go | /*
Copyright 2020 Qiniu Cloud (qiniu.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cl
import (
"reflect"
"github.com/qiniu/goplus/ast/astutil"
"github.com/qiniu/goplus/exec.spec"
"github.com/qiniu/x/log"
)
type iKind = astutil.ConstKind
// iValue represents a Go+ value(s).
// - *goFunc
// - *goValue
// - *nonValue
// - *constVal
// - *funcResult
type iValue interface {
Type() reflect.Type
Kind() iKind
Value(i int) iValue
NumValues() int
}
func isBool(v iValue) bool {
return v.NumValues() == 1 && v.Type() == exec.TyBool
}
// -----------------------------------------------------------------------------
type goValue struct {
t reflect.Type
}
func (p *goValue) Kind() iKind {
return kindOf(p.t)
}
func (p *goValue) Type() reflect.Type {
return p.t
}
func (p *goValue) NumValues() int {
return 1
}
func (p *goValue) Value(i int) iValue {
return p
}
// -----------------------------------------------------------------------------
type nonValue struct {
v interface{} // *exec.GoPackage, goInstr, iType, etc.
}
func (p *nonValue) Kind() iKind {
return reflect.Invalid
}
func (p *nonValue) Type() reflect.Type {
return nil
}
func (p *nonValue) NumValues() int {
return 0
}
func (p *nonValue) Value(i int) iValue {
return p
}
// -----------------------------------------------------------------------------
type wrapValue struct {
x iValue
}
func (p *wrapValue) Type() reflect.Type {
if p.x.NumValues() != 2 {
panic("don't call me")
}
return p.x.Value(0).Type()
}
func (p *wrapValue) Kind() iKind {
if p.x.NumValues() != 2 {
panic("don't call me")
}
return p.x.Value(0).Kind()
}
func (p *wrapValue) NumValues() int {
return p.x.NumValues() - 1
}
func (p *wrapValue) Value(i int) iValue {
return p.x.Value(i)
}
// -----------------------------------------------------------------------------
type funcResults struct {
tfn reflect.Type
}
func (p *funcResults) Kind() iKind {
panic("don't call me")
}
func (p *funcResults) Type() reflect.Type {
panic("don't call me")
}
func (p *funcResults) NumValues() int {
return p.tfn.NumOut()
}
func (p *funcResults) Value(i int) iValue {
return &goValue{t: p.tfn.Out(i)}
}
func newFuncResults(tfn reflect.Type) iValue {
if tfn.NumOut() == 1 {
return &goValue{t: tfn.Out(0)}
}
return &funcResults{tfn: tfn}
}
// -----------------------------------------------------------------------------
type qlFunc funcDecl
func newQlFunc(f *funcDecl) *qlFunc {
return (*qlFunc)(f)
}
func (p *qlFunc) FuncInfo() exec.FuncInfo {
return ((*funcDecl)(p)).Get()
}
func (p *qlFunc) Kind() iKind {
return reflect.Func
}
func (p *qlFunc) Type() reflect.Type {
return ((*funcDecl)(p)).Type()
}
func (p *qlFunc) NumValues() int {
return 1
}
func (p *qlFunc) Value(i int) iValue {
return p
}
func (p *qlFunc) Results() iValue {
return newFuncResults(p.Type())
}
func (p *qlFunc) Proto() iFuncType {
return p.Type()
}
// -----------------------------------------------------------------------------
type goFunc struct {
t reflect.Type
addr uint32
kind exec.SymbolKind
isMethod int // 0 - global func, 1 - method
}
func newGoFunc(addr uint32, kind exec.SymbolKind, isMethod int, ctx *blockCtx) *goFunc {
var t reflect.Type
switch kind {
case exec.SymbolFunc:
t = ctx.GetGoFuncType(exec.GoFuncAddr(addr))
case exec.SymbolFuncv:
t = ctx.GetGoFuncvType(exec.GoFuncvAddr(addr))
default:
log.Panicln("getGoFunc: unknown -", kind, addr)
}
return &goFunc{t: t, addr: addr, kind: kind, isMethod: isMethod}
}
func (p *goFunc) Kind() iKind {
return reflect.Func
}
func (p *goFunc) Type() reflect.Type {
return p.t
}
func (p *goFunc) NumValues() int {
return 1
}
func (p *goFunc) Value(i int) iValue {
return p
}
func (p *goFunc) Results() iValue {
return newFuncResults(p.t)
}
func (p *goFunc) Proto() iFuncType {
return p.t
}
// -----------------------------------------------------------------------------
// isConstBound checks a const is bound or not.
func isConstBound(kind astutil.ConstKind) bool {
return astutil.IsConstBound(kind)
}
type constVal struct {
v interface{}
kind iKind
reserve exec.Reserved
}
func newConstVal(v interface{}, kind iKind) *constVal {
return &constVal{v: v, kind: kind, reserve: exec.InvalidReserved}
}
func (p *constVal) Kind() iKind {
return p.kind
}
func (p *constVal) Type() reflect.Type {
if isConstBound(p.kind) {
return exec.TypeFromKind(p.kind)
}
panic("don't call constVal.TypeOf: unbounded")
}
func (p *constVal) NumValues() int {
return 1
}
func (p *constVal) Value(i int) iValue {
return p
}
func (p *constVal) boundKind() reflect.Kind {
if isConstBound(p.kind) {
return p.kind
}
switch p.kind {
case astutil.ConstUnboundInt:
if _, ok := p.v.(int64); ok {
return reflect.Int
}
return reflect.Uint
case astutil.ConstUnboundFloat:
return reflect.Float64
case astutil.ConstUnboundComplex:
return reflect.Complex128
}
log.Panicln("boundKind: unexpected type kind -", p.kind)
return reflect.Invalid
}
func (p *constVal) boundType() reflect.Type {
return exec.TypeFromKind(p.boundKind())
}
func boundType(in iValue) reflect.Type {
if v, ok := in.(*constVal); ok {
return v.boundType()
}
return in.Type()
}
func (p *constVal) bound(t reflect.Type, b exec.Builder) {
kind := t.Kind()
if p.reserve == exec.InvalidReserved { // bounded
if p.kind != kind {
if t == exec.TyEmptyInterface {
return
}
log.Panicln("function call with invalid argument type: requires", t, ", but got", p.kind)
}
return
}
v := boundConst(p.v, t)
p.v, p.kind = v, kind
p.reserve.Push(b, v)
}
func unaryOp(op exec.Operator, x *constVal) *constVal {
i := op.GetInfo()
xkind := x.kind
var kindReal astutil.ConstKind
if isConstBound(xkind) {
kindReal = xkind
} else {
kindReal = realKindOf(xkind)
}
if (i.InFirst & (1 << kindReal)) == 0 {
log.Panicln("unaryOp failed: invalid argument type.")
}
t := exec.TypeFromKind(kindReal)
vx := boundConst(x.v, t)
v := CallBuiltinOp(kindReal, op, vx)
return &constVal{kind: xkind, v: v, reserve: -1}
}
func binaryOp(op exec.Operator, x, y *constVal) *constVal {
i := op.GetInfo()
xkind := x.kind
ykind := y.kind
var kind, kindReal astutil.ConstKind
if isConstBound(xkind) {
kind, kindReal = xkind, xkind
} else if isConstBound(ykind) {
kind, kindReal = ykind, ykind
} else if xkind < ykind {
kind, kindReal = ykind, realKindOf(ykind)
} else {
kind, kindReal = xkind, realKindOf(xkind)
}
if (i.InFirst & (1 << kindReal)) == 0 {
if kindReal != exec.BigInt && op != exec.OpQuo {
log.Panicln("binaryOp failed: invalid first argument type -", i, kindReal)
}
kind = exec.BigRat
} else if i.Out != exec.SameAsFirst {
kind = i.Out
}
t := exec.TypeFromKind(kindReal)
vx := boundConst(x.v, t)
vy := boundConst(y.v, t)
v := CallBuiltinOp(kindReal, op, vx, vy)
return &constVal{kind: kind, v: v, reserve: -1}
}
func kindOf(t reflect.Type) exec.Kind {
kind := t.Kind()
if kind == reflect.Ptr {
switch t {
case exec.TyBigRat:
return exec.BigRat
case exec.TyBigInt:
return exec.BigInt
case exec.TyBigFloat:
return exec.BigFloat
}
}
return kind
}
func boundConst(v interface{}, t reflect.Type) interface{} |
func constIsConvertible(v interface{}, t reflect.Type) bool {
styp := reflect.TypeOf(v)
skind := styp.Kind()
switch kind := t.Kind(); kind {
case reflect.String:
return skind == reflect.String
case reflect.Complex128, reflect.Complex64:
return skind >= reflect.Int && skind <= reflect.Complex128
}
return styp.ConvertibleTo(t)
}
func realKindOf(kind astutil.ConstKind) reflect.Kind {
switch kind {
case astutil.ConstUnboundInt:
return reflect.Int64
case astutil.ConstUnboundFloat:
return reflect.Float64
case astutil.ConstUnboundComplex:
return reflect.Complex128
default:
return kind
}
}
func boundElementType(elts []interface{}, base, max, step int) reflect.Type {
var tBound reflect.Type
var kindUnbound iKind
for i := base; i < max; i += step {
e := elts[i].(iValue)
if e.NumValues() != 1 {
log.Panicln("boundElementType: unexpected - multiple return values.")
}
kind := e.Kind()
if !isConstBound(kind) { // unbound
if kindUnbound < kind {
kindUnbound = kind
}
} else {
if t := e.Type(); tBound != t {
if tBound != nil { // mismatched type
return nil
}
tBound = t
}
}
}
if tBound != nil {
for i := base; i < max; i += step {
if e, ok := elts[i].(*constVal); ok {
if !constIsConvertible(e.v, tBound) { // mismatched type
return nil
}
}
}
return tBound
}
var kindBound iKind
for i := base; i < max; i += step {
if e, ok := elts[i].(*constVal); ok && e.kind == kindUnbound {
kind := e.boundKind()
if kind != kindBound {
if kindBound != 0 { // mismatched type
return nil
}
kindBound = kind
}
}
}
return exec.TypeFromKind(kindBound)
}
// -----------------------------------------------------------------------------
| {
kind := kindOf(t)
if v == nil {
if kind >= reflect.Chan && kind <= reflect.Slice {
return nil
}
log.Panicln("boundConst: can't convert nil into", t)
}
sval := reflect.ValueOf(v)
st := sval.Type()
if t == st {
return v
}
if kind == reflect.Complex128 || kind == reflect.Complex64 {
if skind := sval.Kind(); skind >= reflect.Int && skind <= reflect.Float64 {
fval := sval.Convert(exec.TyFloat64).Float()
return complex(fval, 0)
}
} else if kind >= exec.BigInt {
val := reflect.New(t.Elem())
skind := kindOf(st)
switch {
case skind >= reflect.Int && skind <= reflect.Int64:
sval = sval.Convert(exec.TyInt64)
val.MethodByName("SetInt64").Call([]reflect.Value{sval})
case skind >= reflect.Uint && skind <= reflect.Uintptr:
sval = sval.Convert(exec.TyUint64)
val.MethodByName("SetUint64").Call([]reflect.Value{sval})
case skind >= reflect.Float32 && skind <= reflect.Float64:
sval = sval.Convert(exec.TyFloat64)
val.MethodByName("SetFloat64").Call([]reflect.Value{sval})
case skind == exec.BigInt:
val.MethodByName("SetInt").Call([]reflect.Value{sval})
case skind == exec.BigFloat:
val.MethodByName("SetRat").Call([]reflect.Value{sval})
default:
log.Panicln("boundConst: convert type failed -", skind)
}
return val.Interface()
}
return sval.Convert(t).Interface()
} |
gc_worker.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::cmp::Ordering;
use std::convert::TryFrom;
use std::fmt::{self, Display, Formatter};
use std::mem;
use std::sync::mpsc;
use std::sync::{atomic, Arc, Mutex};
use std::thread::{self, Builder as ThreadBuilder, JoinHandle};
use std::time::{Duration, Instant};
use engine::rocks::util::get_cf_handle;
use engine::rocks::DB;
use engine::util::delete_all_in_range_cf;
use engine::{CF_DEFAULT, CF_LOCK, CF_WRITE};
use engine_rocks::RocksIOLimiter;
use engine_traits::IOLimiter;
use futures::Future;
use kvproto::kvrpcpb::Context;
use kvproto::metapb;
use log_wrappers::DisplayValue;
use raft::StateRole;
use crate::raftstore::store::keys;
use crate::raftstore::store::msg::StoreMsg;
use crate::raftstore::store::util::find_peer;
use crate::server::transport::ServerRaftStoreRouter;
use crate::storage::kv::{
Engine, Error as EngineError, ErrorInner as EngineErrorInner, RegionInfoProvider, ScanMode,
Statistics,
};
use crate::storage::metrics::*;
use crate::storage::mvcc::{MvccReader, MvccTxn, TimeStamp};
use crate::storage::{Callback, Error, ErrorInner, Key, Result};
use pd_client::PdClient;
use tikv_util::config::ReadableSize;
use tikv_util::time::{duration_to_sec, SlowTimer};
use tikv_util::worker::{self, Builder as WorkerBuilder, Runnable, ScheduleError, Worker};
/// After the GC scan of a key, output a message to the log if there are at least this many
/// versions of the key.
const GC_LOG_FOUND_VERSION_THRESHOLD: usize = 30;
/// After the GC delete versions of a key, output a message to the log if at least this many
/// versions are deleted.
const GC_LOG_DELETED_VERSION_THRESHOLD: usize = 30;
pub const GC_MAX_PENDING_TASKS: usize = 2;
const GC_SNAPSHOT_TIMEOUT_SECS: u64 = 10;
const GC_TASK_SLOW_SECONDS: u64 = 30;
const POLL_SAFE_POINT_INTERVAL_SECS: u64 = 60;
const BEGIN_KEY: &[u8] = b"";
const PROCESS_TYPE_GC: &str = "gc";
const PROCESS_TYPE_SCAN: &str = "scan";
const DEFAULT_GC_RATIO_THRESHOLD: f64 = 1.1;
pub const DEFAULT_GC_BATCH_KEYS: usize = 512;
// No limit
const DEFAULT_GC_MAX_WRITE_BYTES_PER_SEC: u64 = 0;
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(default)]
#[serde(deny_unknown_fields)]
#[serde(rename_all = "kebab-case")]
pub struct GCConfig {
pub ratio_threshold: f64,
pub batch_keys: usize,
pub max_write_bytes_per_sec: ReadableSize,
}
impl Default for GCConfig {
fn default() -> GCConfig {
GCConfig {
ratio_threshold: DEFAULT_GC_RATIO_THRESHOLD,
batch_keys: DEFAULT_GC_BATCH_KEYS,
max_write_bytes_per_sec: ReadableSize(DEFAULT_GC_MAX_WRITE_BYTES_PER_SEC),
}
}
}
impl GCConfig {
pub fn validate(&self) -> std::result::Result<(), Box<dyn std::error::Error>> {
if self.batch_keys == 0 {
return Err(("gc.batch_keys should not be 0.").into());
}
Ok(())
}
}
/// Provides safe point.
/// TODO: Give it a better name?
pub trait GCSafePointProvider: Send + 'static {
fn get_safe_point(&self) -> Result<TimeStamp>;
}
impl<T: PdClient + 'static> GCSafePointProvider for Arc<T> {
fn get_safe_point(&self) -> Result<TimeStamp> {
let future = self.get_gc_safe_point();
future
.wait()
.map(Into::into)
.map_err(|e| box_err!("failed to get safe point from PD: {:?}", e))
}
}
enum GCTask {
GC {
ctx: Context,
safe_point: TimeStamp,
callback: Callback<()>,
},
UnsafeDestroyRange {
ctx: Context,
start_key: Key,
end_key: Key,
callback: Callback<()>,
},
}
impl GCTask {
pub fn take_callback(&mut self) -> Callback<()> {
let callback = match self {
GCTask::GC {
ref mut callback, ..
} => callback,
GCTask::UnsafeDestroyRange {
ref mut callback, ..
} => callback,
};
mem::replace(callback, Box::new(|_| {}))
}
pub fn get_label(&self) -> &'static str {
match self {
GCTask::GC { .. } => "gc",
GCTask::UnsafeDestroyRange { .. } => "unsafe_destroy_range",
}
}
}
impl Display for GCTask {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
GCTask::GC {
ctx, safe_point, ..
} => {
let epoch = format!("{:?}", ctx.region_epoch.as_ref());
f.debug_struct("GC")
.field("region_id", &ctx.get_region_id())
.field("region_epoch", &epoch)
.field("safe_point", safe_point)
.finish()
}
GCTask::UnsafeDestroyRange {
start_key, end_key, ..
} => f
.debug_struct("UnsafeDestroyRange")
.field("start_key", &format!("{}", start_key))
.field("end_key", &format!("{}", end_key))
.finish(),
}
}
}
/// Used to perform GC operations on the engine.
struct GCRunner<E: Engine> {
engine: E,
local_storage: Option<Arc<DB>>,
raft_store_router: Option<ServerRaftStoreRouter>,
/// Used to limit the write flow of GC.
limiter: Arc<Mutex<Option<RocksIOLimiter>>>,
cfg: GCConfig,
stats: Statistics,
}
impl<E: Engine> GCRunner<E> {
pub fn new(
engine: E,
local_storage: Option<Arc<DB>>,
raft_store_router: Option<ServerRaftStoreRouter>,
limiter: Arc<Mutex<Option<RocksIOLimiter>>>,
cfg: GCConfig,
) -> Self {
Self {
engine,
local_storage,
raft_store_router,
limiter,
cfg,
stats: Statistics::default(),
}
}
fn get_snapshot(&self, ctx: &mut Context) -> Result<E::Snap> {
let timeout = Duration::from_secs(GC_SNAPSHOT_TIMEOUT_SECS);
match wait_op!(|cb| self.engine.async_snapshot(ctx, cb), timeout) {
Some((cb_ctx, Ok(snapshot))) => {
if let Some(term) = cb_ctx.term {
ctx.set_term(term);
}
Ok(snapshot)
}
Some((_, Err(e))) => Err(e),
None => Err(EngineError::from(EngineErrorInner::Timeout(timeout))),
}
.map_err(Error::from)
}
/// Scans keys in the region. Returns scanned keys if any, and a key indicating scan progress
fn scan_keys(
&mut self,
ctx: &mut Context,
safe_point: TimeStamp,
from: Option<Key>,
limit: usize,
) -> Result<(Vec<Key>, Option<Key>)> {
let snapshot = self.get_snapshot(ctx)?;
let mut reader = MvccReader::new(
snapshot,
Some(ScanMode::Forward),
!ctx.get_not_fill_cache(),
ctx.get_isolation_level(),
);
let is_range_start = from.is_none();
// range start gc with from == None, and this is an optimization to
// skip gc before scanning all data.
let skip_gc = is_range_start && !reader.need_gc(safe_point, self.cfg.ratio_threshold);
let res = if skip_gc {
KV_GC_SKIPPED_COUNTER.inc();
Ok((vec![], None))
} else {
reader
.scan_keys(from, limit)
.map_err(Error::from)
.and_then(|(keys, next)| {
if keys.is_empty() {
assert!(next.is_none());
if is_range_start {
KV_GC_EMPTY_RANGE_COUNTER.inc();
}
}
Ok((keys, next))
})
};
self.stats.add(reader.get_statistics());
res
}
/// Cleans up outdated data.
fn gc_keys(
&mut self,
ctx: &mut Context,
safe_point: TimeStamp,
keys: Vec<Key>,
mut next_scan_key: Option<Key>,
) -> Result<Option<Key>> {
let snapshot = self.get_snapshot(ctx)?;
let mut txn = MvccTxn::for_scan(
snapshot,
Some(ScanMode::Forward),
TimeStamp::zero(),
!ctx.get_not_fill_cache(),
)
.unwrap();
for k in keys {
let gc_info = txn.gc(k.clone(), safe_point)?;
if gc_info.found_versions >= GC_LOG_FOUND_VERSION_THRESHOLD {
debug!(
"GC found plenty versions for a key";
"region_id" => ctx.get_region_id(),
"versions" => gc_info.found_versions,
"key" => %k
);
}
// TODO: we may delete only part of the versions in a batch, which may not beyond
// the logging threshold `GC_LOG_DELETED_VERSION_THRESHOLD`.
if gc_info.deleted_versions as usize >= GC_LOG_DELETED_VERSION_THRESHOLD {
debug!(
"GC deleted plenty versions for a key";
"region_id" => ctx.get_region_id(),
"versions" => gc_info.deleted_versions,
"key" => %k
);
}
if !gc_info.is_completed {
next_scan_key = Some(k);
break;
}
}
self.stats.add(&txn.take_statistics());
let write_size = txn.write_size();
let modifies = txn.into_modifies();
if !modifies.is_empty() {
if let Some(limiter) = &*self.limiter.lock().unwrap() {
limiter.request(write_size as i64);
}
self.engine.write(ctx, modifies)?;
}
Ok(next_scan_key)
}
fn gc(&mut self, ctx: &mut Context, safe_point: TimeStamp) -> Result<()> {
debug!(
"start doing GC";
"region_id" => ctx.get_region_id(),
"safe_point" => safe_point
);
let mut next_key = None;
loop {
// Scans at most `GCConfig.batch_keys` keys
let (keys, next) = self
.scan_keys(ctx, safe_point, next_key, self.cfg.batch_keys)
.map_err(|e| {
warn!("gc scan_keys failed"; "region_id" => ctx.get_region_id(), "safe_point" => safe_point, "err" => ?e);
e
})?;
if keys.is_empty() {
break;
}
// Does the GC operation on all scanned keys
next_key = self.gc_keys(ctx, safe_point, keys, next).map_err(|e| {
warn!("gc gc_keys failed"; "region_id" => ctx.get_region_id(), "safe_point" => safe_point, "err" => ?e);
e
})?;
if next_key.is_none() {
break;
}
}
debug!(
"gc has finished";
"region_id" => ctx.get_region_id(),
"safe_point" => safe_point
);
Ok(())
}
fn unsafe_destroy_range(&self, _: &Context, start_key: &Key, end_key: &Key) -> Result<()> {
info!(
"unsafe destroy range started";
"start_key" => %start_key, "end_key" => %end_key
);
// TODO: Refine usage of errors
let local_storage = self.local_storage.as_ref().ok_or_else(|| {
let e: Error = box_err!("unsafe destroy range not supported: local_storage not set");
warn!("unsafe destroy range failed"; "err" => ?e);
e
})?;
// Convert keys to RocksDB layer form
// TODO: Logic coupled with raftstore's implementation. Maybe better design is to do it in
// somewhere of the same layer with apply_worker.
let start_data_key = keys::data_key(start_key.as_encoded());
let end_data_key = keys::data_end_key(end_key.as_encoded());
let cfs = &[CF_LOCK, CF_DEFAULT, CF_WRITE];
// First, call delete_files_in_range to free as much disk space as possible
let delete_files_start_time = Instant::now();
for cf in cfs {
let cf_handle = get_cf_handle(local_storage, cf).unwrap();
local_storage
.delete_files_in_range_cf(cf_handle, &start_data_key, &end_data_key, false)
.map_err(|e| {
let e: Error = box_err!(e);
warn!(
"unsafe destroy range failed at delete_files_in_range_cf"; "err" => ?e
);
e
})?;
}
info!(
"unsafe destroy range finished deleting files in range";
"start_key" => %start_key, "end_key" => %end_key, "cost_time" => ?delete_files_start_time.elapsed()
);
// Then, delete all remaining keys in the range.
let cleanup_all_start_time = Instant::now();
for cf in cfs {
// TODO: set use_delete_range with config here.
delete_all_in_range_cf(local_storage, cf, &start_data_key, &end_data_key, false)
.map_err(|e| {
let e: Error = box_err!(e);
warn!(
"unsafe destroy range failed at delete_all_in_range_cf"; "err" => ?e
);
e
})?;
}
let cleanup_all_time_cost = cleanup_all_start_time.elapsed();
if let Some(router) = self.raft_store_router.as_ref() {
router
.send_store(StoreMsg::ClearRegionSizeInRange {
start_key: start_key.as_encoded().to_vec(),
end_key: end_key.as_encoded().to_vec(),
})
.unwrap_or_else(|e| {
// Warn and ignore it.
warn!(
"unsafe destroy range: failed sending ClearRegionSizeInRange";
"err" => ?e
);
});
} else {
warn!("unsafe destroy range: can't clear region size information: raft_store_router not set");
}
info!(
"unsafe destroy range finished cleaning up all";
"start_key" => %start_key, "end_key" => %end_key, "cost_time" => ?cleanup_all_time_cost,
);
Ok(())
}
fn handle_gc_worker_task(&mut self, mut task: GCTask) {
let label = task.get_label();
GC_GCTASK_COUNTER_VEC.with_label_values(&[label]).inc();
let timer = SlowTimer::from_secs(GC_TASK_SLOW_SECONDS);
let result = match &mut task {
GCTask::GC {
ctx, safe_point, ..
} => self.gc(ctx, *safe_point),
GCTask::UnsafeDestroyRange {
ctx,
start_key,
end_key,
..
} => self.unsafe_destroy_range(ctx, start_key, end_key),
};
GC_TASK_DURATION_HISTOGRAM_VEC
.with_label_values(&[label])
.observe(duration_to_sec(timer.elapsed()));
slow_log!(timer, "{}", task);
if result.is_err() {
GC_GCTASK_FAIL_COUNTER_VEC.with_label_values(&[label]).inc();
}
(task.take_callback())(result);
}
}
impl<E: Engine> Runnable<GCTask> for GCRunner<E> {
#[inline]
fn run(&mut self, task: GCTask) {
self.handle_gc_worker_task(task);
}
// The default implementation of `run_batch` prints a warning to log when it takes over 1 second
// to handle a task. It's not proper here, so override it to remove the log.
#[inline]
fn run_batch(&mut self, tasks: &mut Vec<GCTask>) {
for task in tasks.drain(..) {
self.run(task);
}
}
fn on_tick(&mut self) {
let stats = mem::replace(&mut self.stats, Statistics::default());
for (cf, details) in stats.details().iter() {
for (tag, count) in details.iter() {
GC_KEYS_COUNTER_VEC
.with_label_values(&[cf, *tag])
.inc_by(*count as i64);
}
}
}
}
/// When we failed to schedule a `GCTask` to `GCRunner`, use this to handle the `ScheduleError`.
fn handle_gc_task_schedule_error(e: ScheduleError<GCTask>) -> Result<()> {
match e {
ScheduleError::Full(mut task) => {
GC_TOO_BUSY_COUNTER.inc();
(task.take_callback())(Err(Error::from(ErrorInner::GCWorkerTooBusy)));
Ok(())
}
_ => Err(box_err!("failed to schedule gc task: {:?}", e)),
}
}
/// Schedules a `GCTask` to the `GCRunner`.
fn schedule_gc(
scheduler: &worker::Scheduler<GCTask>,
ctx: Context,
safe_point: TimeStamp,
callback: Callback<()>,
) -> Result<()> {
scheduler
.schedule(GCTask::GC {
ctx,
safe_point,
callback,
})
.or_else(handle_gc_task_schedule_error)
}
/// Does GC synchronously.
fn gc(scheduler: &worker::Scheduler<GCTask>, ctx: Context, safe_point: TimeStamp) -> Result<()> {
wait_op!(|callback| schedule_gc(scheduler, ctx, safe_point, callback)).unwrap_or_else(|| {
error!("failed to receive result of gc");
Err(box_err!("gc_worker: failed to receive result of gc"))
})
}
/// The configurations of automatic GC.
pub struct AutoGCConfig<S: GCSafePointProvider, R: RegionInfoProvider> {
pub safe_point_provider: S,
pub region_info_provider: R,
/// Used to find which peer of a region is on this TiKV, so that we can compose a `Context`.
pub self_store_id: u64,
pub poll_safe_point_interval: Duration,
/// If this is set, safe_point will be checked before doing GC on every region while working.
/// Otherwise safe_point will be only checked when `poll_safe_point_interval` has past since
/// last checking.
pub always_check_safe_point: bool,
/// This will be called when a round of GC has finished and goes back to idle state.
/// This field is for test purpose.
pub post_a_round_of_gc: Option<Box<dyn Fn() + Send>>,
}
impl<S: GCSafePointProvider, R: RegionInfoProvider> AutoGCConfig<S, R> {
/// Creates a new config.
pub fn new(safe_point_provider: S, region_info_provider: R, self_store_id: u64) -> Self {
Self {
safe_point_provider,
region_info_provider,
self_store_id,
poll_safe_point_interval: Duration::from_secs(POLL_SAFE_POINT_INTERVAL_SECS),
always_check_safe_point: false,
post_a_round_of_gc: None,
}
}
/// Creates a config for test purpose. The interval to poll safe point is as short as 0.1s and
/// during GC it never skips checking safe point.
pub fn new_test_cfg(
safe_point_provider: S,
region_info_provider: R,
self_store_id: u64,
) -> Self {
Self {
safe_point_provider,
region_info_provider,
self_store_id,
poll_safe_point_interval: Duration::from_millis(100),
always_check_safe_point: true,
post_a_round_of_gc: None,
}
}
}
/// The only error that will break `GCManager`'s process is that the `GCManager` is interrupted by
/// others, maybe due to TiKV shutting down.
#[derive(Debug)]
enum GCManagerError {
Stopped,
}
type GCManagerResult<T> = std::result::Result<T, GCManagerError>;
/// Used to check if `GCManager` should be stopped.
///
/// When `GCManager` is running, it might take very long time to GC a round. It should be able to
/// break at any time so that we can shut down TiKV in time.
struct GCManagerContext {
/// Used to receive stop signal. The sender side is hold in `GCManagerHandler`.
/// If this field is `None`, the `GCManagerContext` will never stop.
stop_signal_receiver: Option<mpsc::Receiver<()>>,
/// Whether an stop signal is received.
is_stopped: bool,
}
impl GCManagerContext {
pub fn new() -> Self {
Self {
stop_signal_receiver: None,
is_stopped: false,
}
}
/// Sets the receiver that used to receive the stop signal. `GCManagerContext` will be
/// considered to be stopped as soon as a message is received from the receiver.
pub fn set_stop_signal_receiver(&mut self, rx: mpsc::Receiver<()>) {
self.stop_signal_receiver = Some(rx);
}
/// Sleeps for a while. if a stop message is received, returns immediately with
/// `GCManagerError::Stopped`.
fn sleep_or_stop(&mut self, timeout: Duration) -> GCManagerResult<()> {
if self.is_stopped {
return Err(GCManagerError::Stopped);
}
match self.stop_signal_receiver.as_ref() {
Some(rx) => match rx.recv_timeout(timeout) {
Ok(_) => {
self.is_stopped = true;
Err(GCManagerError::Stopped)
}
Err(mpsc::RecvTimeoutError::Timeout) => Ok(()),
Err(mpsc::RecvTimeoutError::Disconnected) => {
panic!("stop_signal_receiver unexpectedly disconnected")
}
},
None => {
thread::sleep(timeout);
Ok(())
}
}
}
/// Checks if a stop message has been fired. Returns `GCManagerError::Stopped` if there's such
/// a message.
fn check_stopped(&mut self) -> GCManagerResult<()> {
if self.is_stopped |
match self.stop_signal_receiver.as_ref() {
Some(rx) => match rx.try_recv() {
Ok(_) => {
self.is_stopped = true;
Err(GCManagerError::Stopped)
}
Err(mpsc::TryRecvError::Empty) => Ok(()),
Err(mpsc::TryRecvError::Disconnected) => {
error!("stop_signal_receiver unexpectedly disconnected, gc_manager will stop");
Err(GCManagerError::Stopped)
}
},
None => Ok(()),
}
}
}
/// Composites a `kvproto::Context` with the given `region` and `peer`.
fn make_context(mut region: metapb::Region, peer: metapb::Peer) -> Context {
let mut ctx = Context::default();
ctx.set_region_id(region.get_id());
ctx.set_region_epoch(region.take_region_epoch());
ctx.set_peer(peer);
ctx.set_not_fill_cache(true);
ctx
}
/// Used to represent the state of `GCManager`.
#[derive(PartialEq)]
enum GCManagerState {
None,
Init,
Idle,
Working,
}
impl GCManagerState {
pub fn tag(&self) -> &str {
match self {
GCManagerState::None => "",
GCManagerState::Init => "initializing",
GCManagerState::Idle => "idle",
GCManagerState::Working => "working",
}
}
}
#[inline]
fn set_status_metrics(state: GCManagerState) {
for s in &[
GCManagerState::Init,
GCManagerState::Idle,
GCManagerState::Working,
] {
AUTO_GC_STATUS_GAUGE_VEC
.with_label_values(&[s.tag()])
.set(if state == *s { 1 } else { 0 });
}
}
/// Wraps `JoinHandle` of `GCManager` and helps to stop the `GCManager` synchronously.
struct GCManagerHandle {
join_handle: JoinHandle<()>,
stop_signal_sender: mpsc::Sender<()>,
}
impl GCManagerHandle {
/// Stops the `GCManager`.
pub fn stop(self) -> Result<()> {
let res: Result<()> = self
.stop_signal_sender
.send(())
.map_err(|e| box_err!("failed to send stop signal to gc worker thread: {:?}", e));
if res.is_err() {
return res;
}
self.join_handle
.join()
.map_err(|e| box_err!("failed to join gc worker thread: {:?}", e))
}
}
/// Controls how GC runs automatically on the TiKV.
/// It polls safe point periodically, and when the safe point is updated, `GCManager` will start to
/// scan all regions (whose leader is on this TiKV), and does GC on all those regions.
struct GCManager<S: GCSafePointProvider, R: RegionInfoProvider> {
cfg: AutoGCConfig<S, R>,
/// The current safe point. `GCManager` will try to update it periodically. When `safe_point` is
/// updated, `GCManager` will start to do GC on all regions.
safe_point: TimeStamp,
safe_point_last_check_time: Instant,
/// Used to schedule `GCTask`s.
worker_scheduler: worker::Scheduler<GCTask>,
/// Holds the running status. It will tell us if `GCManager` should stop working and exit.
gc_manager_ctx: GCManagerContext,
}
impl<S: GCSafePointProvider, R: RegionInfoProvider> GCManager<S, R> {
pub fn new(
cfg: AutoGCConfig<S, R>,
worker_scheduler: worker::Scheduler<GCTask>,
) -> GCManager<S, R> {
GCManager {
cfg,
safe_point: TimeStamp::zero(),
safe_point_last_check_time: Instant::now(),
worker_scheduler,
gc_manager_ctx: GCManagerContext::new(),
}
}
/// Starts working in another thread. This function moves the `GCManager` and returns a handler
/// of it.
fn start(mut self) -> Result<GCManagerHandle> {
set_status_metrics(GCManagerState::Init);
self.initialize();
let (tx, rx) = mpsc::channel();
self.gc_manager_ctx.set_stop_signal_receiver(rx);
let res: Result<_> = ThreadBuilder::new()
.name(thd_name!("gc-manager"))
.spawn(move || {
self.run();
})
.map_err(|e| box_err!("failed to start gc manager: {:?}", e));
res.map(|join_handle| GCManagerHandle {
join_handle,
stop_signal_sender: tx,
})
}
/// Polls safe point and does GC in a loop, again and again, until interrupted by invoking
/// `GCManagerHandle::stop`.
fn run(&mut self) {
debug!("gc-manager is started");
self.run_impl().unwrap_err();
set_status_metrics(GCManagerState::None);
debug!("gc-manager is stopped");
}
fn run_impl(&mut self) -> GCManagerResult<()> {
loop {
AUTO_GC_PROCESSED_REGIONS_GAUGE_VEC
.with_label_values(&[PROCESS_TYPE_GC])
.set(0);
AUTO_GC_PROCESSED_REGIONS_GAUGE_VEC
.with_label_values(&[PROCESS_TYPE_SCAN])
.set(0);
set_status_metrics(GCManagerState::Idle);
self.wait_for_next_safe_point()?;
set_status_metrics(GCManagerState::Working);
self.gc_a_round()?;
if let Some(on_finished) = self.cfg.post_a_round_of_gc.as_ref() {
on_finished();
}
}
}
/// Sets the initial state of the `GCManger`.
/// The only task of initializing is to simply get the current safe point as the initial value
/// of `safe_point`. TiKV won't do any GC automatically until the first time `safe_point` was
/// updated to a greater value than initial value.
fn initialize(&mut self) {
debug!("gc-manager is initializing");
self.safe_point = TimeStamp::zero();
self.try_update_safe_point();
debug!("gc-manager started"; "safe_point" => self.safe_point);
}
/// Waits until the safe_point updates. Returns the new safe point.
fn wait_for_next_safe_point(&mut self) -> GCManagerResult<TimeStamp> {
loop {
if self.try_update_safe_point() {
return Ok(self.safe_point);
}
self.gc_manager_ctx
.sleep_or_stop(self.cfg.poll_safe_point_interval)?;
}
}
/// Tries to update the safe point. Returns true if safe point has been updated to a greater
/// value. Returns false if safe point didn't change or we encountered an error.
fn try_update_safe_point(&mut self) -> bool {
self.safe_point_last_check_time = Instant::now();
let safe_point = match self.cfg.safe_point_provider.get_safe_point() {
Ok(res) => res,
// Return false directly so we will check it a while later.
Err(e) => {
error!("failed to get safe point from pd"; "err" => ?e);
return false;
}
};
match safe_point.cmp(&self.safe_point) {
Ordering::Less => {
panic!(
"got new safe point {} which is less than current safe point {}. \
there must be something wrong.",
safe_point, self.safe_point
);
}
Ordering::Equal => false,
Ordering::Greater => {
debug!("gc_worker: update safe point"; "safe_point" => safe_point);
self.safe_point = safe_point;
AUTO_GC_SAFE_POINT_GAUGE.set(safe_point.into_inner() as i64);
true
}
}
}
/// Scans all regions on the TiKV whose leader is this TiKV, and does GC on all of them.
/// Regions are scanned and GC-ed in lexicographical order.
///
/// While the `gc_a_round` function is running, it will periodically check whether safe_point is
/// updated before the function `gc_a_round` finishes. If so, *Rewinding* will occur. For
/// example, when we just starts to do GC, our progress is like this: ('^' means our current
/// progress)
///
/// ```text
/// | region 1 | region 2 | region 3| region 4 | region 5 | region 6 |
/// ^
/// ```
///
/// And after a while, our GC progress is like this:
///
/// ```text
/// | region 1 | region 2 | region 3| region 4 | region 5 | region 6 |
/// ----------------------^
/// ```
///
/// At this time we found that safe point was updated, so rewinding will happen. First we
/// continue working to the end: ('#' indicates the position that safe point updates)
///
/// ```text
/// | region 1 | region 2 | region 3| region 4 | region 5 | region 6 |
/// ----------------------#------------------------------------------^
/// ```
///
/// Then region 1-2 were GC-ed with the old safe point and region 3-6 were GC-ed with the new
/// new one. Then, we *rewind* to the very beginning and continue GC to the position that safe
/// point updates:
///
/// ```text
/// | region 1 | region 2 | region 3| region 4 | region 5 | region 6 |
/// ----------------------#------------------------------------------^
/// ----------------------^
/// ```
///
/// Then GC finishes.
/// If safe point updates again at some time, it will still try to GC all regions with the
/// latest safe point. If safe point always updates before `gc_a_round` finishes, `gc_a_round`
/// may never stop, but it doesn't matter.
fn gc_a_round(&mut self) -> GCManagerResult<()> {
let mut need_rewind = false;
// Represents where we should stop doing GC. `None` means the very end of the TiKV.
let mut end = None;
// Represents where we have GC-ed to. `None` means the very end of the TiKV.
let mut progress = Some(Key::from_encoded(BEGIN_KEY.to_vec()));
// Records how many region we have GC-ed.
let mut processed_regions = 0;
info!(
"gc_worker: start auto gc"; "safe_point" => self.safe_point
);
// The following loop iterates all regions whose leader is on this TiKV and does GC on them.
// At the same time, check whether safe_point is updated periodically. If it's updated,
// rewinding will happen.
loop {
self.gc_manager_ctx.check_stopped()?;
// Check the current GC progress and determine if we are going to rewind or we have
// finished the round of GC.
if need_rewind {
if progress.is_none() {
// We have worked to the end and we need to rewind. Restart from beginning.
progress = Some(Key::from_encoded(BEGIN_KEY.to_vec()));
need_rewind = false;
info!(
"gc_worker: auto gc rewinds"; "processed_regions" => processed_regions
);
processed_regions = 0;
// Set the metric to zero to show that rewinding has happened.
AUTO_GC_PROCESSED_REGIONS_GAUGE_VEC
.with_label_values(&[PROCESS_TYPE_GC])
.set(0);
AUTO_GC_PROCESSED_REGIONS_GAUGE_VEC
.with_label_values(&[PROCESS_TYPE_SCAN])
.set(0);
}
} else {
// We are not going to rewind, So we will stop if `progress` reaches `end`.
let finished = match (progress.as_ref(), end.as_ref()) {
(None, _) => true,
(Some(p), Some(e)) => p >= e,
_ => false,
};
if finished {
// We have worked to the end of the TiKV or our progress has reached `end`, and we
// don't need to rewind. In this case, the round of GC has finished.
info!(
"gc_worker: finished auto gc"; "processed_regions" => processed_regions
);
return Ok(());
}
}
assert!(progress.is_some());
// Before doing GC, check whether safe_point is updated periodically to determine if
// rewinding is needed.
self.check_if_need_rewind(&progress, &mut need_rewind, &mut end);
progress = self.gc_next_region(progress.unwrap(), &mut processed_regions)?;
}
}
/// Checks whether we need to rewind in this round of GC. Only used in `gc_a_round`.
fn check_if_need_rewind(
&mut self,
progress: &Option<Key>,
need_rewind: &mut bool,
end: &mut Option<Key>,
) {
if self.safe_point_last_check_time.elapsed() < self.cfg.poll_safe_point_interval
&& !self.cfg.always_check_safe_point
{
// Skip this check.
return;
}
if !self.try_update_safe_point() {
// Safe point not updated. Skip it.
return;
}
if progress.as_ref().unwrap().as_encoded().is_empty() {
// `progress` is empty means the starting. We don't need to rewind. We just
// continue GC to the end.
*need_rewind = false;
*end = None;
info!(
"gc_worker: auto gc will go to the end"; "safe_point" => self.safe_point
);
} else {
*need_rewind = true;
*end = progress.clone();
info!(
"gc_worker: auto gc will go to rewind"; "safe_point" => self.safe_point,
"next_rewind_key" => %(end.as_ref().unwrap())
);
}
}
/// Does GC on the next region after `from_key`. Returns the end key of the region it processed.
/// If we have processed to the end of all regions, returns `None`.
fn gc_next_region(
&mut self,
from_key: Key,
processed_regions: &mut usize,
) -> GCManagerResult<Option<Key>> {
// Get the information of the next region to do GC.
let (ctx, next_key) = self.get_next_gc_context(from_key);
if ctx.is_none() {
// No more regions.
return Ok(None);
}
let ctx = ctx.unwrap();
// Do GC.
// Ignore the error and continue, since it's useless to retry this.
// TODO: Find a better way to handle errors. Maybe we should retry.
debug!(
"trying gc"; "region_id" => ctx.get_region_id(), "region_epoch" => ?ctx.region_epoch.as_ref(),
"end_key" => next_key.as_ref().map(DisplayValue)
);
if let Err(e) = gc(&self.worker_scheduler, ctx.clone(), self.safe_point) {
error!(
"failed gc"; "region_id" => ctx.get_region_id(), "region_epoch" => ?ctx.region_epoch.as_ref(),
"end_key" => next_key.as_ref().map(DisplayValue),
"err" => ?e
);
}
*processed_regions += 1;
AUTO_GC_PROCESSED_REGIONS_GAUGE_VEC
.with_label_values(&[PROCESS_TYPE_GC])
.inc();
Ok(next_key)
}
/// Gets the next region with end_key greater than given key, and the current TiKV is its
/// leader, so we can do GC on it.
/// Returns context to call GC and end_key of the region. The returned end_key will be none if
/// the region's end_key is empty.
fn get_next_gc_context(&mut self, key: Key) -> (Option<Context>, Option<Key>) {
let (tx, rx) = mpsc::channel();
let store_id = self.cfg.self_store_id;
let res = self.cfg.region_info_provider.seek_region(
key.as_encoded(),
Box::new(move |iter| {
let mut scanned_regions = 0;
for info in iter {
scanned_regions += 1;
if info.role == StateRole::Leader {
if find_peer(&info.region, store_id).is_some() {
let _ = tx.send((Some(info.region.clone()), scanned_regions));
return;
}
}
}
let _ = tx.send((None, scanned_regions));
}),
);
if let Err(e) = res {
error!(
"gc_worker: failed to get next region information"; "err" => ?e
);
return (None, None);
};
let seek_region_res = rx.recv().map(|(region, scanned_regions)| {
AUTO_GC_PROCESSED_REGIONS_GAUGE_VEC
.with_label_values(&[PROCESS_TYPE_SCAN])
.add(scanned_regions);
region
});
match seek_region_res {
Ok(Some(mut region)) => {
let peer = find_peer(®ion, store_id).unwrap().clone();
let end_key = region.take_end_key();
let next_key = if end_key.is_empty() {
None
} else {
Some(Key::from_encoded(end_key))
};
(Some(make_context(region, peer)), next_key)
}
Ok(None) => (None, None),
Err(e) => {
error!("failed to get next region information"; "err" => ?e);
(None, None)
}
}
}
}
/// Used to schedule GC operations.
pub struct GCWorker<E: Engine> {
engine: E,
/// `local_storage` represent the underlying RocksDB of the `engine`.
local_storage: Option<Arc<DB>>,
/// `raft_store_router` is useful to signal raftstore clean region size informations.
raft_store_router: Option<ServerRaftStoreRouter>,
cfg: Option<GCConfig>,
limiter: Arc<Mutex<Option<RocksIOLimiter>>>,
/// How many strong references. The worker will be stopped
/// once there are no more references.
refs: Arc<atomic::AtomicUsize>,
worker: Arc<Mutex<Worker<GCTask>>>,
worker_scheduler: worker::Scheduler<GCTask>,
gc_manager_handle: Arc<Mutex<Option<GCManagerHandle>>>,
}
impl<E: Engine> Clone for GCWorker<E> {
#[inline]
fn clone(&self) -> Self {
self.refs.fetch_add(1, atomic::Ordering::SeqCst);
Self {
engine: self.engine.clone(),
local_storage: self.local_storage.clone(),
raft_store_router: self.raft_store_router.clone(),
cfg: self.cfg.clone(),
limiter: self.limiter.clone(),
refs: self.refs.clone(),
worker: self.worker.clone(),
worker_scheduler: self.worker_scheduler.clone(),
gc_manager_handle: self.gc_manager_handle.clone(),
}
}
}
impl<E: Engine> Drop for GCWorker<E> {
#[inline]
fn drop(&mut self) {
let refs = self.refs.fetch_sub(1, atomic::Ordering::SeqCst);
if refs != 1 {
return;
}
let r = self.stop();
if let Err(e) = r {
error!("Failed to stop gc_worker"; "err" => ?e);
}
}
}
impl<E: Engine> GCWorker<E> {
pub fn new(
engine: E,
local_storage: Option<Arc<DB>>,
raft_store_router: Option<ServerRaftStoreRouter>,
cfg: GCConfig,
) -> GCWorker<E> {
let worker = Arc::new(Mutex::new(
WorkerBuilder::new("gc-worker")
.pending_capacity(GC_MAX_PENDING_TASKS)
.create(),
));
let worker_scheduler = worker.lock().unwrap().scheduler();
let limiter = if cfg.max_write_bytes_per_sec.0 > 0 {
let bps = i64::try_from(cfg.max_write_bytes_per_sec.0)
.expect("snap_max_write_bytes_per_sec > i64::max_value");
Some(IOLimiter::new(bps))
} else {
None
};
GCWorker {
engine,
local_storage,
raft_store_router,
cfg: Some(cfg),
limiter: Arc::new(Mutex::new(limiter)),
refs: Arc::new(atomic::AtomicUsize::new(1)),
worker,
worker_scheduler,
gc_manager_handle: Arc::new(Mutex::new(None)),
}
}
pub fn start_auto_gc<S: GCSafePointProvider, R: RegionInfoProvider>(
&self,
cfg: AutoGCConfig<S, R>,
) -> Result<()> {
let mut handle = self.gc_manager_handle.lock().unwrap();
assert!(handle.is_none());
let new_handle = GCManager::new(cfg, self.worker_scheduler.clone()).start()?;
*handle = Some(new_handle);
Ok(())
}
pub fn start(&mut self) -> Result<()> {
let runner = GCRunner::new(
self.engine.clone(),
self.local_storage.take(),
self.raft_store_router.take(),
self.limiter.clone(),
self.cfg.take().unwrap(),
);
self.worker
.lock()
.unwrap()
.start(runner)
.map_err(|e| box_err!("failed to start gc_worker, err: {:?}", e))
}
pub fn stop(&self) -> Result<()> {
// Stop GCManager.
if let Some(h) = self.gc_manager_handle.lock().unwrap().take() {
h.stop()?;
}
// Stop self.
if let Some(h) = self.worker.lock().unwrap().stop() {
if let Err(e) = h.join() {
return Err(box_err!("failed to join gc_worker handle, err: {:?}", e));
}
}
Ok(())
}
pub fn async_gc(
&self,
ctx: Context,
safe_point: TimeStamp,
callback: Callback<()>,
) -> Result<()> {
KV_COMMAND_COUNTER_VEC_STATIC.gc.inc();
self.worker_scheduler
.schedule(GCTask::GC {
ctx,
safe_point,
callback,
})
.or_else(handle_gc_task_schedule_error)
}
/// Cleans up all keys in a range and quickly free the disk space. The range might span over
/// multiple regions, and the `ctx` doesn't indicate region. The request will be done directly
/// on RocksDB, bypassing the Raft layer. User must promise that, after calling `destroy_range`,
/// the range will never be accessed any more. However, `destroy_range` is allowed to be called
/// multiple times on an single range.
pub fn async_unsafe_destroy_range(
&self,
ctx: Context,
start_key: Key,
end_key: Key,
callback: Callback<()>,
) -> Result<()> {
KV_COMMAND_COUNTER_VEC_STATIC.unsafe_destroy_range.inc();
self.worker_scheduler
.schedule(GCTask::UnsafeDestroyRange {
ctx,
start_key,
end_key,
callback,
})
.or_else(handle_gc_task_schedule_error)
}
pub fn change_io_limit(&self, limit: i64) -> Result<()> {
let mut limiter = self.limiter.lock().unwrap();
if limit == 0 {
limiter.take();
} else {
limiter
.get_or_insert_with(|| RocksIOLimiter::new(limit))
.set_bytes_per_second(limit as i64);
}
info!("GC io limit changed"; "max_write_bytes_per_sec" => limit);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::raftstore::coprocessor::{RegionInfo, SeekRegionCallback};
use crate::raftstore::store::util::new_peer;
use crate::storage::kv::Result as EngineResult;
use crate::storage::lock_manager::DummyLockManager;
use crate::storage::{Mutation, Options, Storage, TestEngineBuilder, TestStorageBuilder};
use futures::Future;
use kvproto::metapb;
use std::collections::BTreeMap;
use std::sync::mpsc::{channel, Receiver, Sender};
struct MockSafePointProvider {
rx: Receiver<TimeStamp>,
}
impl GCSafePointProvider for MockSafePointProvider {
fn get_safe_point(&self) -> Result<TimeStamp> {
// Error will be ignored by `GCManager`, which is equivalent to that the safe_point
// is not updated.
self.rx.try_recv().map_err(|e| box_err!(e))
}
}
#[derive(Clone)]
struct MockRegionInfoProvider {
// start_key -> (region_id, end_key)
regions: BTreeMap<Vec<u8>, RegionInfo>,
}
impl RegionInfoProvider for MockRegionInfoProvider {
fn seek_region(&self, from: &[u8], callback: SeekRegionCallback) -> EngineResult<()> {
let from = from.to_vec();
callback(&mut self.regions.range(from..).map(|(_, v)| v));
Ok(())
}
}
struct MockGCRunner {
tx: Sender<GCTask>,
}
impl Runnable<GCTask> for MockGCRunner {
fn run(&mut self, mut t: GCTask) {
let cb = t.take_callback();
self.tx.send(t).unwrap();
cb(Ok(()));
}
}
/// A set of utilities that helps testing `GCManager`.
/// The safe_point polling interval is set to 100 ms.
struct GCManagerTestUtil {
gc_manager: Option<GCManager<MockSafePointProvider, MockRegionInfoProvider>>,
worker: Worker<GCTask>,
safe_point_sender: Sender<TimeStamp>,
gc_task_receiver: Receiver<GCTask>,
}
impl GCManagerTestUtil {
pub fn new(regions: BTreeMap<Vec<u8>, RegionInfo>) -> Self {
let mut worker = WorkerBuilder::new("test-gc-worker").create();
let (gc_task_sender, gc_task_receiver) = channel();
worker.start(MockGCRunner { tx: gc_task_sender }).unwrap();
let (safe_point_sender, safe_point_receiver) = channel();
let mut cfg = AutoGCConfig::new(
MockSafePointProvider {
rx: safe_point_receiver,
},
MockRegionInfoProvider { regions },
1,
);
cfg.poll_safe_point_interval = Duration::from_millis(100);
cfg.always_check_safe_point = true;
let gc_manager = GCManager::new(cfg, worker.scheduler());
Self {
gc_manager: Some(gc_manager),
worker,
safe_point_sender,
gc_task_receiver,
}
}
/// Collect `GCTask`s that `GCManager` tried to execute.
pub fn collect_scheduled_tasks(&self) -> Vec<GCTask> {
self.gc_task_receiver.try_iter().collect()
}
pub fn add_next_safe_point(&self, safe_point: impl Into<TimeStamp>) {
self.safe_point_sender.send(safe_point.into()).unwrap();
}
pub fn stop(&mut self) {
self.worker.stop().unwrap().join().unwrap();
}
}
/// Run a round of auto GC and check if it correctly GC regions as expected.
///
/// Param `regions` is a `Vec` of tuples which is `(start_key, end_key, region_id)`
///
/// The first value in param `safe_points` will be used to initialize the GCManager, and the remaining
/// values will be checked before every time GC-ing a region. If the length of `safe_points` is
/// less than executed GC tasks, the last value will be used for extra GC tasks.
///
/// Param `expected_gc_tasks` is a `Vec` of tuples which is `(region_id, safe_point)`.
fn test_auto_gc(
regions: Vec<(Vec<u8>, Vec<u8>, u64)>,
safe_points: Vec<impl Into<TimeStamp> + Copy>,
expected_gc_tasks: Vec<(u64, impl Into<TimeStamp>)>,
) {
let regions: BTreeMap<_, _> = regions
.into_iter()
.map(|(start_key, end_key, id)| {
let mut r = metapb::Region::default();
r.set_id(id);
r.set_start_key(start_key.clone());
r.set_end_key(end_key);
r.mut_peers().push(new_peer(1, 1));
let info = RegionInfo::new(r, StateRole::Leader);
(start_key, info)
})
.collect();
let mut test_util = GCManagerTestUtil::new(regions);
for safe_point in &safe_points {
test_util.add_next_safe_point(*safe_point);
}
test_util.gc_manager.as_mut().unwrap().initialize();
test_util.gc_manager.as_mut().unwrap().gc_a_round().unwrap();
test_util.stop();
let gc_tasks: Vec<_> = test_util
.collect_scheduled_tasks()
.iter()
.map(|task| match task {
GCTask::GC {
ctx, safe_point, ..
} => (ctx.get_region_id(), *safe_point),
_ => unreachable!(),
})
.collect();
// Following code asserts gc_tasks == expected_gc_tasks.
assert_eq!(gc_tasks.len(), expected_gc_tasks.len());
let all_passed = gc_tasks.into_iter().zip(expected_gc_tasks.into_iter()).all(
|((region, safe_point), (expect_region, expect_safe_point))| {
region == expect_region && safe_point == expect_safe_point.into()
},
);
assert!(all_passed);
}
#[test]
fn test_make_context() {
let mut peer = metapb::Peer::default();
peer.set_id(233);
peer.set_store_id(2333);
let mut epoch = metapb::RegionEpoch::default();
epoch.set_conf_ver(123);
epoch.set_version(456);
let mut region = metapb::Region::default();
region.set_region_epoch(epoch.clone());
region.set_id(789);
let ctx = make_context(region.clone(), peer.clone());
assert_eq!(ctx.get_region_id(), region.get_id());
assert_eq!(ctx.get_peer(), &peer);
assert_eq!(ctx.get_region_epoch(), &epoch);
}
#[test]
fn test_update_safe_point() {
let mut test_util = GCManagerTestUtil::new(BTreeMap::new());
let mut gc_manager = test_util.gc_manager.take().unwrap();
assert_eq!(gc_manager.safe_point, TimeStamp::zero());
test_util.add_next_safe_point(233);
assert!(gc_manager.try_update_safe_point());
assert_eq!(gc_manager.safe_point, 233.into());
let (tx, rx) = channel();
ThreadBuilder::new()
.spawn(move || {
let safe_point = gc_manager.wait_for_next_safe_point().unwrap();
tx.send(safe_point).unwrap();
})
.unwrap();
test_util.add_next_safe_point(233);
test_util.add_next_safe_point(233);
test_util.add_next_safe_point(234);
assert_eq!(rx.recv().unwrap(), 234.into());
test_util.stop();
}
#[test]
fn test_gc_manager_initialize() {
let mut test_util = GCManagerTestUtil::new(BTreeMap::new());
let mut gc_manager = test_util.gc_manager.take().unwrap();
assert_eq!(gc_manager.safe_point, TimeStamp::zero());
test_util.add_next_safe_point(0);
test_util.add_next_safe_point(5);
gc_manager.initialize();
assert_eq!(gc_manager.safe_point, TimeStamp::zero());
assert!(gc_manager.try_update_safe_point());
assert_eq!(gc_manager.safe_point, 5.into());
}
#[test]
fn test_auto_gc_a_round_without_rewind() {
// First region starts with empty and last region ends with empty.
let regions = vec![
(b"".to_vec(), b"1".to_vec(), 1),
(b"1".to_vec(), b"2".to_vec(), 2),
(b"3".to_vec(), b"4".to_vec(), 3),
(b"7".to_vec(), b"".to_vec(), 4),
];
test_auto_gc(
regions,
vec![233],
vec![(1, 233), (2, 233), (3, 233), (4, 233)],
);
// First region doesn't starts with empty and last region doesn't ends with empty.
let regions = vec![
(b"0".to_vec(), b"1".to_vec(), 1),
(b"1".to_vec(), b"2".to_vec(), 2),
(b"3".to_vec(), b"4".to_vec(), 3),
(b"7".to_vec(), b"8".to_vec(), 4),
];
test_auto_gc(
regions,
vec![233],
vec![(1, 233), (2, 233), (3, 233), (4, 233)],
);
}
#[test]
fn test_auto_gc_rewinding() {
for regions in vec![
// First region starts with empty and last region ends with empty.
vec![
(b"".to_vec(), b"1".to_vec(), 1),
(b"1".to_vec(), b"2".to_vec(), 2),
(b"3".to_vec(), b"4".to_vec(), 3),
(b"7".to_vec(), b"".to_vec(), 4),
],
// First region doesn't starts with empty and last region doesn't ends with empty.
vec![
(b"0".to_vec(), b"1".to_vec(), 1),
(b"1".to_vec(), b"2".to_vec(), 2),
(b"3".to_vec(), b"4".to_vec(), 3),
(b"7".to_vec(), b"8".to_vec(), 4),
],
] {
test_auto_gc(
regions.clone(),
vec![233, 234],
vec![(1, 234), (2, 234), (3, 234), (4, 234)],
);
test_auto_gc(
regions.clone(),
vec![233, 233, 234],
vec![(1, 233), (2, 234), (3, 234), (4, 234), (1, 234)],
);
test_auto_gc(
regions.clone(),
vec![233, 233, 233, 233, 234],
vec![
(1, 233),
(2, 233),
(3, 233),
(4, 234),
(1, 234),
(2, 234),
(3, 234),
],
);
test_auto_gc(
regions.clone(),
vec![233, 233, 233, 234, 235],
vec![
(1, 233),
(2, 233),
(3, 234),
(4, 235),
(1, 235),
(2, 235),
(3, 235),
],
);
let mut safe_points = vec![233, 233, 233, 234, 234, 234, 235];
// The logic of `gc_a_round` wastes a loop when the last region's end_key is not null, so it
// will check safe point one more time before GC-ing the first region after rewinding.
if !regions.last().unwrap().1.is_empty() {
safe_points.insert(5, 234);
}
test_auto_gc(
regions.clone(),
safe_points,
vec![
(1, 233),
(2, 233),
(3, 234),
(4, 234),
(1, 234),
(2, 235),
(3, 235),
(4, 235),
(1, 235),
],
);
}
}
/// Assert the data in `storage` is the same as `expected_data`. Keys in `expected_data` should
/// be encoded form without ts.
fn check_data<E: Engine>(
storage: &Storage<E, DummyLockManager>,
expected_data: &BTreeMap<Vec<u8>, Vec<u8>>,
) {
let scan_res = storage
.async_scan(
Context::default(),
Key::from_encoded_slice(b""),
None,
expected_data.len() + 1,
1.into(),
Options::default(),
)
.wait()
.unwrap();
let all_equal = scan_res
.into_iter()
.map(|res| res.unwrap())
.zip(expected_data.iter())
.all(|((k1, v1), (k2, v2))| &k1 == k2 && &v1 == v2);
assert!(all_equal);
}
fn test_destroy_range_impl(
init_keys: &[Vec<u8>],
start_ts: impl Into<TimeStamp>,
commit_ts: impl Into<TimeStamp>,
start_key: &[u8],
end_key: &[u8],
) -> Result<()> {
// Return Result from this function so we can use the `wait_op` macro here.
let engine = TestEngineBuilder::new().build().unwrap();
let storage = TestStorageBuilder::from_engine(engine.clone())
.build()
.unwrap();
let db = engine.get_rocksdb();
let mut gc_worker = GCWorker::new(engine, Some(db), None, GCConfig::default());
gc_worker.start().unwrap();
// Convert keys to key value pairs, where the value is "value-{key}".
let data: BTreeMap<_, _> = init_keys
.iter()
.map(|key| {
let mut value = b"value-".to_vec();
value.extend_from_slice(key);
(Key::from_raw(key).into_encoded(), value)
})
.collect();
// Generate `Mutation`s from these keys.
let mutations: Vec<_> = init_keys
.iter()
.map(|key| {
let mut value = b"value-".to_vec();
value.extend_from_slice(key);
Mutation::Put((Key::from_raw(key), value))
})
.collect();
let primary = init_keys[0].clone();
let start_ts = start_ts.into();
// Write these data to the storage.
wait_op!(|cb| storage.async_prewrite(
Context::default(),
mutations,
primary,
start_ts,
Options::default(),
cb
))
.unwrap()
.unwrap();
// Commit.
let keys: Vec<_> = init_keys.iter().map(|k| Key::from_raw(k)).collect();
wait_op!(|cb| storage.async_commit(
Context::default(),
keys,
start_ts,
commit_ts.into(),
cb
))
.unwrap()
.unwrap();
// Assert these data is successfully written to the storage.
check_data(&storage, &data);
let start_key = Key::from_raw(start_key);
let end_key = Key::from_raw(end_key);
// Calculate expected data set after deleting the range.
let data: BTreeMap<_, _> = data
.into_iter()
.filter(|(k, _)| k < start_key.as_encoded() || k >= end_key.as_encoded())
.collect();
// Invoke unsafe destroy range.
wait_op!(|cb| gc_worker.async_unsafe_destroy_range(
Context::default(),
start_key,
end_key,
cb
))
.unwrap()
.unwrap();
// Check remaining data is as expected.
check_data(&storage, &data);
Ok(())
}
#[test]
fn test_destroy_range() {
test_destroy_range_impl(
&[
b"key1".to_vec(),
b"key2".to_vec(),
b"key3".to_vec(),
b"key4".to_vec(),
b"key5".to_vec(),
],
5,
10,
b"key2",
b"key4",
)
.unwrap();
test_destroy_range_impl(
&[b"key1".to_vec(), b"key9".to_vec()],
5,
10,
b"key3",
b"key7",
)
.unwrap();
test_destroy_range_impl(
&[
b"key3".to_vec(),
b"key4".to_vec(),
b"key5".to_vec(),
b"key6".to_vec(),
b"key7".to_vec(),
],
5,
10,
b"key1",
b"key9",
)
.unwrap();
test_destroy_range_impl(
&[
b"key1".to_vec(),
b"key2".to_vec(),
b"key3".to_vec(),
b"key4".to_vec(),
b"key5".to_vec(),
],
5,
10,
b"key2\x00",
b"key4",
)
.unwrap();
test_destroy_range_impl(
&[
b"key1".to_vec(),
b"key1\x00".to_vec(),
b"key1\x00\x00".to_vec(),
b"key1\x00\x00\x00".to_vec(),
],
5,
10,
b"key1\x00",
b"key1\x00\x00",
)
.unwrap();
test_destroy_range_impl(
&[
b"key1".to_vec(),
b"key1\x00".to_vec(),
b"key1\x00\x00".to_vec(),
b"key1\x00\x00\x00".to_vec(),
],
5,
10,
b"key1\x00",
b"key1\x00",
)
.unwrap();
}
#[test]
fn test_gc_config_validate() {
let cfg = GCConfig::default();
cfg.validate().unwrap();
let mut invalid_cfg = GCConfig::default();
invalid_cfg.batch_keys = 0;
assert!(invalid_cfg.validate().is_err());
}
#[test]
fn test_change_io_limit() {
let engine = TestEngineBuilder::new().build().unwrap();
let mut gc_worker = GCWorker::new(engine, None, None, GCConfig::default());
gc_worker.start().unwrap();
assert!(gc_worker.limiter.lock().unwrap().is_none());
// Enable io iolimit
gc_worker.change_io_limit(1024).unwrap();
assert_eq!(
gc_worker
.limiter
.lock()
.unwrap()
.as_ref()
.unwrap()
.get_bytes_per_second(),
1024
);
// Change io limit
gc_worker.change_io_limit(2048).unwrap();
assert_eq!(
gc_worker
.limiter
.lock()
.unwrap()
.as_ref()
.unwrap()
.get_bytes_per_second(),
2048,
);
// Disable io limit
gc_worker.change_io_limit(0).unwrap();
assert!(gc_worker.limiter.lock().unwrap().is_none());
}
}
| {
return Err(GCManagerError::Stopped);
} |
delete.rs | // Copyright 2022 RisingLight Project Authors. Licensed under Apache-2.0.
use super::*;
use crate::binder::{BoundDelete, BoundTableRef};
use crate::optimizer::plan_nodes::{LogicalDelete, LogicalFilter};
impl LogicalPlaner {
pub fn | (&self, stmt: BoundDelete) -> Result<PlanRef, LogicalPlanError> {
if let BoundTableRef::BaseTableRef { ref ref_id, .. } = stmt.from_table {
if let Some(expr) = stmt.where_clause {
let child = self.plan_table_ref(&stmt.from_table, true, false)?;
Ok(Arc::new(LogicalDelete::new(
*ref_id,
Arc::new(LogicalFilter::new(expr, child)),
)))
} else {
panic!("delete whole table is not supported yet")
}
} else {
panic!("unsupported table")
}
}
}
| plan_delete |
network.py | # future
from __future__ import annotations
# stdlib
import os
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
# third party
import ascii_magic
from nacl.signing import SigningKey
from nacl.signing import VerifyKey
from pydantic import BaseSettings
# relative
from ...lib.python import String
from ...logger import error
from ..common.message import SignedImmediateSyftMessageWithReply
from ..common.message import SignedMessage
from ..common.message import SyftMessage
from ..common.uid import UID
from ..io.location import Location
from ..io.location import SpecificLocation
from .common.node import Node
from .common.node_manager.association_request_manager import AssociationRequestManager
from .common.node_manager.node_manager import NodeManager
from .common.node_manager.node_route_manager import NodeRouteManager
from .common.node_manager.role_manager import RoleManager
from .common.node_manager.user_manager import UserManager
from .common.node_service.association_request.association_request_service import (
AssociationRequestService,
)
from .common.node_service.association_request.association_request_service import (
AssociationRequestWithoutReplyService,
)
from .common.node_service.network_search.network_search_service import (
NetworkSearchService,
)
from .common.node_service.node_setup.node_setup_messages import (
CreateInitialSetUpMessage,
)
from .common.node_service.node_setup.node_setup_service import NodeSetupService
from .common.node_service.peer_discovery.peer_discovery_service import (
PeerDiscoveryService,
)
from .common.node_service.ping.ping_service import PingService
from .common.node_service.request_receiver.request_receiver_messages import (
RequestMessage,
)
from .common.node_service.role_manager.role_manager_service import RoleManagerService
from .common.node_service.user_manager.user_manager_service import UserManagerService
from .common.node_service.vpn.vpn_service import VPNConnectService
from .common.node_service.vpn.vpn_service import VPNJoinSelfService
from .common.node_service.vpn.vpn_service import VPNJoinService
from .common.node_service.vpn.vpn_service import VPNRegisterService
from .common.node_service.vpn.vpn_service import VPNStatusService
from .domain import Domain
from .domain_client import DomainClient
from .network_client import NetworkClient
class Network(Node):
network: SpecificLocation
child_type = Domain
client_type = NetworkClient
child_type_client_type = DomainClient
def __init__(
self,
name: Optional[str],
network: SpecificLocation = SpecificLocation(),
domain: Optional[Location] = None,
device: Optional[Location] = None,
vm: Optional[Location] = None,
signing_key: Optional[SigningKey] = None,
verify_key: Optional[VerifyKey] = None,
root_key: Optional[VerifyKey] = None,
db_engine: Any = None,
settings: Optional[BaseSettings] = None,
):
super().__init__(
name=name,
network=network,
domain=domain,
device=device,
vm=vm,
signing_key=signing_key,
verify_key=verify_key,
db_engine=db_engine,
settings=settings,
)
# share settings with the FastAPI application level
self.settings = settings
# specific location with name
self.network = SpecificLocation(name=self.name)
self.root_key = root_key
# Database Management Instances
self.users = UserManager(db_engine)
self.roles = RoleManager(db_engine)
self.node = NodeManager(db_engine)
self.node_route = NodeRouteManager(db_engine)
self.association_requests = AssociationRequestManager(db_engine)
# Grid Network Services
self.immediate_services_with_reply.append(AssociationRequestService)
self.immediate_services_with_reply.append(NodeSetupService)
self.immediate_services_with_reply.append(RoleManagerService)
self.immediate_services_with_reply.append(UserManagerService)
self.immediate_services_with_reply.append(VPNConnectService)
self.immediate_services_with_reply.append(VPNJoinService)
self.immediate_services_with_reply.append(VPNRegisterService)
self.immediate_services_with_reply.append(VPNStatusService)
self.immediate_services_with_reply.append(VPNJoinSelfService)
self.immediate_services_with_reply.append(PingService)
self.immediate_services_with_reply.append(NetworkSearchService)
self.immediate_services_with_reply.append(PeerDiscoveryService)
self.immediate_services_without_reply.append(
AssociationRequestWithoutReplyService
)
self.requests: List[RequestMessage] = list()
# available_device_types = set()
# TODO: add available compute types
# default_device = None
# TODO: add default compute type
self._register_services()
self.request_handlers: List[Dict[Union[str, String], Any]] = []
self.handled_requests: Dict[Any, float] = {}
self.post_init()
def initial_setup( # nosec
self,
first_superuser_name: str = "Jane Doe",
first_superuser_email: str = "[email protected]",
first_superuser_password: str = "changethis",
first_superuser_budget: float = 5.55,
domain_name: str = "BigHospital",
) -> Network:
# Build Syft Message
msg: SignedImmediateSyftMessageWithReply = CreateInitialSetUpMessage(
address=self.address,
name=first_superuser_name,
email=first_superuser_email,
password=first_superuser_password,
domain_name=domain_name,
budget=first_superuser_budget,
reply_to=self.address,
).sign(signing_key=self.signing_key)
# Process syft message
_ = self.recv_immediate_msg_with_reply(msg=msg).message
return self
def post_init(self) -> None:
super().post_init()
self.set_node_uid()
def loud_print(self) -> None:
try:
install_path = os.path.abspath(
os.path.join(os.path.realpath(__file__), "../../../img/")
)
ascii_magic.to_terminal(
ascii_magic.from_image_file(
img_path=install_path + "/pygrid.png", columns=83
)
)
print(
r"""
|\ | _ |_ _ _ |
| \| (- |_ \)/ (_) | |(
"""
)
except Exception:
print("NETOWRK NODE (print fail backup)")
@property
def icon(self) -> str:
return "🔗"
@property
def id( | lf) -> UID:
return self.network.id
def message_is_for_me(self, msg: Union[SyftMessage, SignedMessage]) -> bool:
# this needs to be defensive by checking network_id NOT network.id or it breaks
try:
return msg.address.network_id == self.id and msg.address.domain is None
except Exception as e:
error(f"Error checking if {msg.pprint} is for me on {self.pprint}. {e}")
return False
| se |
tableofcontents.go | package main
import (
"net/url"
"strings"
"github.com/maxence-charriere/go-app/v7/pkg/app"
)
type tableOfContents struct {
app.Compo
Ilinks []string
selected string
}
func newTableOfContents() *tableOfContents {
return &tableOfContents{}
}
func (t *tableOfContents) Links(v ...string) *tableOfContents {
t.Ilinks = v
return t
}
func (t *tableOfContents) OnNav(ctx app.Context, u *url.URL) {
t.selected = "#" + u.Fragment
t.Update()
}
func (t *tableOfContents) Render() app.UI {
return app.Aside().
Class("pane").
Class("index").
Body(
app.H1().Text("Index"),
app.Section().Body(
app.Range(t.Ilinks).Slice(func(i int) app.UI {
link := t.Ilinks[i]
return &tableOfContentLink{
Title: link,
Focus: t.selected == githubIndex(link),
}
}),
),
app.Section().Body(
&tableOfContentLink{
Title: "Report issue",
Focus: t.selected == "#report-issue",
},
&tableOfContentLink{
Title: "Support go-app",
Focus: t.selected == "#support-go-app",
},
),
)
}
type tableOfContentLink struct {
app.Compo
Title string
Focus bool
}
func (l *tableOfContentLink) Render() app.UI {
focus := ""
if l.Focus {
focus = "focus"
}
return app.A().
Class(focus).
Href(githubIndex(l.Title)).
Text(l.Title)
}
func | (s string) string {
s = strings.ToLower(s)
s = strings.TrimSpace(s)
s = strings.ReplaceAll(s, "?", "")
s = strings.ReplaceAll(s, "(", "")
s = strings.ReplaceAll(s, ")", "")
s = strings.ReplaceAll(s, " ", "-")
s = strings.ReplaceAll(s, ".", "-")
return "#" + s
}
| githubIndex |
logger.rs | use crate::event::{Event, MessageEvent, MetaEvent};
use async_trait::async_trait;
use colored::*;
use tracing::{event, Level};
/// Message Event Logger
pub fn message_logger(event: &MessageEvent) |
/// Meta Event Logger
pub fn meta_logger(event: &MetaEvent) {
if &event.meta_event_type == "heartbeat" {
event!(Level::TRACE, "Recive HeartBeat")
}
}
#[derive(Debug, Clone)]
pub struct Logger;
impl Logger {
async fn event_recv(self, mut event_receiver: crate::EventReceiver) {
while let Ok(event) = event_receiver.recv().await {
match &event {
Event::Message(m) => message_logger(m),
Event::Meta(m) => meta_logger(m),
_ => {}
}
}
}
}
#[async_trait]
impl crate::Plugin for Logger {
fn run(&self, event_receiver: crate::EventReceiver, _: crate::BotGetter) {
let l = self.clone();
tokio::spawn(l.event_recv(event_receiver));
}
fn plugin_name(&self) -> &'static str {
"Logger"
}
async fn load_config(&mut self, _: toml::Value) {}
}
| {
match &event {
MessageEvent::Private(p) => {
let mut user_id = p.user_id.to_string();
while user_id.len() < 10 {
user_id.insert(0, ' ');
}
event!(
Level::INFO,
"{} [{}] -> {} from {}({})",
user_id.green(),
p.self_id.to_string().red(),
p.raw_message,
p.sender.nickname.to_string().blue(),
p.user_id.to_string().green(),
)
}
MessageEvent::Group(g) => {
let mut group_id = g.group_id.to_string();
while group_id.len() < 10 {
group_id.insert(0, ' ');
}
event!(
Level::INFO,
"{} [{}] -> {} from {}({})",
group_id.magenta(),
g.self_id.to_string().red(),
g.raw_message,
g.sender.nickname.to_string().blue(),
g.user_id.to_string().green(),
)
}
}
} |
paren.rs | pub fn rule(
build_ctx: &crate::builder::BuildCtx,
node: &rnix::SyntaxNode,
) -> std::collections::LinkedList<crate::builder::Step> | {
let mut steps = std::collections::LinkedList::new();
let mut children = crate::children::Children::new(build_ctx, node);
let layout = if children.has_comments() {
&crate::config::Layout::Tall
} else {
build_ctx.config.layout()
};
// (
let child = children.get_next().unwrap();
steps.push_back(crate::builder::Step::Format(child.element));
match layout {
crate::config::Layout::Tall => {
steps.push_back(crate::builder::Step::Indent);
}
crate::config::Layout::Wide => {}
}
// /**/
children.drain_comments(|text| {
steps.push_back(crate::builder::Step::NewLine);
steps.push_back(crate::builder::Step::Pad);
steps.push_back(crate::builder::Step::Comment(text));
});
// expr
let child = children.get_next().unwrap();
match layout {
crate::config::Layout::Tall => {
steps.push_back(crate::builder::Step::NewLine);
steps.push_back(crate::builder::Step::Pad);
steps.push_back(crate::builder::Step::FormatWider(child.element));
}
crate::config::Layout::Wide => {
steps.push_back(crate::builder::Step::Format(child.element));
}
}
// /**/
children.drain_comments(|text| {
steps.push_back(crate::builder::Step::NewLine);
steps.push_back(crate::builder::Step::Pad);
steps.push_back(crate::builder::Step::Comment(text));
});
// )
let child = children.get_next().unwrap();
match layout {
crate::config::Layout::Tall => {
steps.push_back(crate::builder::Step::Dedent);
steps.push_back(crate::builder::Step::NewLine);
steps.push_back(crate::builder::Step::Pad);
}
crate::config::Layout::Wide => {}
}
steps.push_back(crate::builder::Step::Format(child.element));
steps
} |
|
auxv_amd64.go | package auxv
import (
"encoding/binary"
"io"
)
|
// ReadFrom reads an auxilliary vector value from r.
func (w *Word) ReadFrom(r io.Reader) error {
return binary.Read(r, binary.LittleEndian, w)
} | // Word is the type used by the auxilliary vector for both the key and values
// of the vector's pairs.
type Word uint64 |
geodesic_test_proj.py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import click
import os
#import json
import pandas as pd
import numpy as np
import pyproj
import matplotlib.pyplot as plt
from geodesic import plot_geodesic
@click.command()
@click.argument('xls_filename')
@click.option('--outdir', default='', help="Output directory - default is 'script_directory\out'")
def | (xls_filename, outdir):
basepath = os.path.dirname(__file__)
#basepath = os.path.dirname(os.path.abspath(__file__))
if outdir=='':
outdir = os.path.join(basepath, 'out')
#xls_filename = os.path.join(outdir, "Provence-Oisans2.xlsx")
filename_base, filename_ext = os.path.splitext(os.path.basename(xls_filename))
d_df = {}
d_df = pd.read_excel(xls_filename, sheetname=None)
max_x, max_y = d_df['ref']['PosX'].max(), d_df['ref']['PosY'].max()
print "max_x=%s, max_y=%s" % (max_x, max_y)
p = pyproj.Proj(
proj='utm',
zone=32,
ellps='WGS84'
)
d_df['measures']['PosX2'] = np.nan
d_df['measures']['PosY2'] = np.nan
for i, point in d_df['measures'].iterrows():
xy2 = p(point['Lat'], point['Lon'])
d_df['measures']['PosX2'][i] = xy2[0]
d_df['measures']['PosY2'][i] = xy2[1]
#print(xy2)
d_df['measures']['Eps'] = np.sqrt(
(d_df['measures']['PosX2'] - d_df['measures']['PosX'])**2 + \
(d_df['measures']['PosY2'] - d_df['measures']['PosY'])**2
)
print(d_df)
print(d_df['measures']['Eps'].mean())
plot_geodesic(outdir, filename_base, d_df['measures'])
plt.show()
#if show:
# plt.show()
if __name__ == "__main__":
main()
| main |
display.py | import pygame as pg
import pygame_widgets as pw
from math import sin, cos
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
WHITE = (255,255,255)
YELLOW = (220,220,0)
RED = (220,0,0)
GREY = (180,180,180)
BLACK = (0,0,0)
GREEN = (0,200,0)
BUTTON_COLOR = (0,0,220)
BUTTON_HOVER_COLOR = GREEN
BUTTON_PRESS_COLOR = (0,100,0)
def createScreen():
screen = pg.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
screen.fill(GREY)
return screen
def displayCircle(screen, message, yellow, red):
x = SCREEN_WIDTH / 2
y = SCREEN_HEIGHT / 2
radius = SCREEN_HEIGHT / 4
if (yellow and red):
pg.draw.circle(screen, RED, [x, y], radius, 0, draw_top_right=True, draw_bottom_right=True)
pg.draw.circle(screen, YELLOW, [x, y], radius, 0, draw_top_left=True , draw_bottom_left=True)
elif yellow:
pg.draw.circle(screen, YELLOW, [x, y], radius, 0)
elif red:
pg.draw.circle(screen, RED, [x, y], radius, 0)
font = pg.font.SysFont(None, 40)
text = font.render(message, True, BLACK)
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery
screen.blit(text,textRect)
def simulateNeoPixel(screen, neopixel):
size = 10
radius = 100
angle = 0
for color in neopixel.pixels:
x = int((SCREEN_WIDTH / 2) + radius*cos(angle))
y = int((SCREEN_HEIGHT / 2) - radius*sin(angle))
pg.draw.circle(screen, color, [x, y], size, 0)
angle += 3.14159 / 12
def displayStartButton(screen, callback):
width = 200
height = 50
x = (SCREEN_WIDTH - width) / 2
y = SCREEN_HEIGHT * 0.8
button = pw.Button(
screen, x, y, width, height, text='START',
fontSize=50,
textColour=(255,255,255),
inactiveColour=BUTTON_COLOR,
hoverColour=BUTTON_HOVER_COLOR,
pressedColour=BUTTON_PRESS_COLOR,
radius=10,
onClick=callback
)
return button
def displayYesButton(screen, callback):
width = 200
height = 50
x = (SCREEN_WIDTH * 0.45) - width
y = SCREEN_HEIGHT * 0.8
button = pw.Button(
screen, x, y, width, height, text='YES',
fontSize=50,
textColour=(255,255,255),
inactiveColour=BUTTON_COLOR,
hoverColour=BUTTON_HOVER_COLOR,
pressedColour=BUTTON_PRESS_COLOR,
radius=10,
onClick=callback
)
return button
def displayNoButton(screen, callback):
width = 200
height = 50
x = (SCREEN_WIDTH * 0.55)
y = SCREEN_HEIGHT * 0.8
button = pw.Button(
screen, x, y, width, height, text='NO',
fontSize=50,
textColour=(255,255,255),
inactiveColour=BUTTON_COLOR,
hoverColour=BUTTON_HOVER_COLOR,
pressedColour=BUTTON_PRESS_COLOR,
radius=10,
onClick=callback
)
return button
def createRoundButton(screen, callback, x, y, text, color):
width = 40
height = 40
button = pw.Button(
screen, x, y, width, height, text=text,
fontSize=60,
textColour=(255,255,255),
inactiveColour=color,
hoverColour=color,
pressedColour=color,
radius=20,
onClick=callback
)
return button
def displayIncYellowButton(screen, callback):
x = 20
y = SCREEN_HEIGHT * 0.4
return createRoundButton(screen, callback, x, y, "+", YELLOW)
def displayDecYellowButton(screen, callback):
x = 20
y = SCREEN_HEIGHT * 0.5
return createRoundButton(screen, callback, x, y, "-", YELLOW)
def displayIncRedButton(screen, callback):
|
def displayDecRedButton(screen, callback):
x = SCREEN_WIDTH - 40 - 20
y = SCREEN_HEIGHT * 0.5
return createRoundButton(screen, callback, x, y, "-", RED)
def createSkipButton(screen, callback):
width = 100
height = 40
x = (SCREEN_WIDTH - width) * 0.5
y = SCREEN_HEIGHT - 50 - 10
button = pw.Button(
screen, x, y, width, height, text="SKIP",
fontSize=30,
textColour=(255,255,255),
inactiveColour=BUTTON_COLOR,
hoverColour=BUTTON_HOVER_COLOR,
pressedColour=BUTTON_PRESS_COLOR,
radius=20,
onClick=callback
)
return button
def displayScore(screen, yellow, red):
font = pg.font.SysFont(None, 100)
text = font.render(str(yellow), True, YELLOW)
textRect = text.get_rect()
textRect.centerx = SCREEN_WIDTH * 0.17
textRect.centery = screen.get_rect().centery
screen.blit(text,textRect)
text = font.render(str(red), True, RED)
textRect = text.get_rect()
textRect.centerx = SCREEN_WIDTH * (1 - 0.17)
textRect.centery = screen.get_rect().centery
screen.blit(text,textRect)
def displayMusicTitle(screen, title):
font = pg.font.SysFont(None, 30)
text = font.render(str(title), True, BLACK)
textRect = text.get_rect()
textRect.centerx = int(SCREEN_WIDTH * 0.5)
textRect.centery = int(SCREEN_HEIGHT * 0.1)
screen.blit(text,textRect)
| x = SCREEN_WIDTH - 40 - 20
y = SCREEN_HEIGHT * 0.4
return createRoundButton(screen, callback, x, y, "+", RED) |
test_singleton_container.py | from flying_ioc import IocManager
class TSingleton1:
def __init__(self):
pass
|
ioc.set_class(name='singleton1', cls=TSingleton1, singleton=True)
assert ioc.singleton1 is ioc.singleton1
ioc.print_stats() |
def test_singleton_container():
ioc = IocManager(stats=True) |
hyperparams.py | import os
B = 2 # batch size
MB = 1 # batch size for metric learning
max_clusters = 2
commitment_cost = 0.25
is_refine_net = False
is_init_cluter_with_instance = False
top_grasp_only = False
H = 240 # height
W = 320 # width
# BY = 200*2 # bird height (y axis, [-40, 40])
# BX = 176*2 # bird width (x axis, [0, 70.4])
# BZ = 20 # bird depth (z axis, [-3.0, 1.0])
# MH = 200*2
# MW = 176*2
# MD = 20
Z = 128
Y = 64
X = 128
PH = int(128/4)
PW = int(384/4)
fix_crop = False
# ZY = 32
# ZX = 32
# ZZ = 16
N = 50 # number of boxes produced by the rcnn (not all are good)
K = 1 # number of boxes to actually use
S = 2 # seq length
T = 256 # height & width of birdview map
V = 100000 # num velodyne points
sensor_S = 10 # sensor length for sequence
#----------- loading -----------#
loadname = None
emb2D_init = ""
feat_init = ""
obj_init = ""
box_init = ""
ort_init = ""
inp_init = ""
traj_init = ""
occ_init = ""
view_init = ""
vis_init = ""
flow_init = ""
ego_init = ""
total_init = True
touch_feat_init = "" # path to initialize the touch featnet
touch_forward_init = "" # path to initialize the context net
reset_iter = False
#--------- training mode ----------#
do_compute_cluster_center = False
do_freeze_emb2D = False
do_freeze_feat = False
do_freeze_obj = False
do_freeze_box = False
do_freeze_ort = False
do_freeze_inp = False
do_freeze_traj = False
do_freeze_occ = False
do_freeze_view = False
do_freeze_vis = False
do_freeze_flow = False
do_freeze_ego = False
do_freeze_touch_feat = False
do_freeze_touch_forward = False
do_resume = False
do_profile = False
# by default, only backprop on "train" iters
backprop_on_train = True
backprop_on_val = False
backprop_on_test = False
# eval mode: save npys
do_eval_map = False
do_eval_recall = False # keep a buffer and eval recall within it
do_save_embs = False
do_save_ego = False
#----------- augs -----------#
# do_aug2D = False
# do_aug3D = False
do_aug_color = False
do_time_flip = False
do_horz_flip = False
do_synth_rt = False
do_synth_nomotion = False
do_piecewise_rt = False
do_sparsify_pointcloud = 0 # choose a number here, for # pts to use
#----------- net design -----------#
# run nothing
do_emb2D = False
do_emb3D = False
do_feat = False
do_obj = False
do_box = False
do_ort = False
do_inp = False
do_traj = False
do_occ = False
do_view = False
do_flow = False
do_ego = False
do_vis = False
do_touch_embML = False
do_touch_feat = False
do_touch_occ = False
do_touch_forward = False
do_moc = False
do_metric_learning = False
do_validation = False
do_generate_data = False
do_det = False
deeper_det = False
#----------- general hypers -----------#
lr = 0.0
#----------- emb hypers -----------#
emb_2D_smooth_coeff = 0.0
emb_3D_smooth_coeff = 0.0
emb_2D_ml_coeff = 0.0
emb_3D_ml_coeff = 0.0
emb_2D_l2_coeff = 0.0
emb_3D_l2_coeff = 0.0
emb_2D_mindist = 0.0
emb_3D_mindist = 0.0
emb_2D_num_samples = 0
emb_3D_num_samples = 0
# ..... Added for touch embedding .... #
emb_3D_touch_num_samples = 0
emb_3D_touch_mindist = 0.0
emb_3D_touch_ml_coeff = 0.0
emb_3D_touch_l2_coeff = 0.0
#----------- feat hypers -----------#
feat_coeff = 0.0
feat_rigid_coeff = 0.0
feat_do_vae = False
feat_do_sb = False
feat_do_resnet = False
feat_do_sparse_invar = False
feat_kl_coeff = 0.0
feat_dim = 8
feat_do_flip = False
feat_do_rt = False
#----------- obj hypers -----------#
obj_coeff = 0.0
obj_dim = 8
#----------- box hypers -----------#
box_sup_coeff = 0.0
box_cs_coeff = 0.0
box_dim = 8
#----------- ort hypers -----------#
ort_coeff = 0.0
ort_warp_coeff = 0.0
ort_dim = 8
#----------- inp hypers -----------#
inp_coeff = 0.0
inp_dim = 8
#----------- traj hypers -----------#
traj_coeff = 0.0
traj_dim = 8
#----------- occ hypers -----------#
occ_do_cheap = False
occ_coeff = 0.0
occ_smooth_coeff = 0.0
#----------- view hypers -----------#
view_depth = 64
view_pred_embs = False
view_pred_rgb = False
view_l1_coeff = 0.0
view_ce_coeff = 0.0
view_dl_coeff = 0.0
#----------- vis hypers-------------#
vis_softmax_coeff = 0.0
vis_hard_coeff = 0.0
vis_l1_coeff = 0.0
vis_debug = False
#----------- flow hypers -----------#
flow_warp_coeff = 0.0
flow_cycle_coeff = 0.0
flow_smooth_coeff = 0.0
flow_l1_coeff = 0.0
flow_synth_l1_coeff = 0.0
flow_do_synth_rt = False
flow_patch_size = 4
#----------- ego hypers -----------#
ego_use_gt = False
ego_use_precomputed = False
ego_rtd_coeff = 0.0
ego_rta_coeff = 0.0
ego_traj_coeff = 0.0
ego_warp_coeff = 0.0
# ---------- Place holder for forward prediction hyper if any ----------- #
contextH = 4
contextW = 4
contextD = 4
# ---- metric learning loss ---- #
metric_learning_loss_type = "cluster_id" # success_rate
# --------- moc hypers ------------- #
dict_len = 10000
num_neg_samples = 2000
do_bn = True # Do I have the capability of doing batch normalization
num_pos_samples = 1024 # helpful for doing voxel level moco_learning
# --------- det hypers ------------- #
det_anchor_size = 12.0
det_prob_coeff = 1.0
det_reg_coeff = 1.0
alpha_pos = 1.5
beta_neg = 1.0
det_anchor_size_x = 0
det_anchor_size_y = 0
det_anchor_size_z = 0
#----------- mod -----------#
mod = '""'
############ slower-to-change hyperparams below here ############
## logging
log_freq_train = 100
log_freq_val = 100
log_freq_test = 100
snap_freq = 5000
max_iters = 10000
shuffle_train = True
shuffle_val = True
shuffle_test = True
dataset_name = ""
seqname = ""
trainset = ""
valset = ""
testset = ""
dataset_list_dir = ""
dataset_location = ""
validation_path = ""
validate_after = 1
dataset_format = "py" #can be py or npz
# mode selection
do_zoom = False
do_carla_det = False
do_carla_mot = False
do_carla_flo = False
do_carla_sta = False
do_mujoco_offline = False
do_mujoco_offline_metric = False
do_touch_embed = False
############ rev up the experiment ############
train_mode = "train"
mode = os.environ["MODE"]
print('os.environ mode is %s' % mode)
if mode=="CARLA_DET":
exec(compile(open('config_files/exp_carla_det.py').read(), 'exp_carla_det.py', 'exec'))
elif mode=="CARLA_MOT":
exec(compile(open('config_files/exp_carla_mot.py').read(), 'exp_carla_mot.py', 'exec'))
elif mode=="CARLA_FLO":
exec(compile(open('config_files/exp_carla_flo.py').read(), 'exp_carla_flo.py', 'exec'))
elif mode=="CARLA_STA":
exec(compile(open('config_files/exp_carla_sta.py').read(), 'exp_carla_sta.py', 'exec'))
elif mode=="MUJOCO_OFFLINE":
exec(open('config_files/exp_mujoco_offline.py').read())
elif mode=="MUJOCO_OFFLINE_METRIC":
exec(open('config_files/exp_mujoco_offline_metric.py').read())
elif mode=="MUJOCO_OFFLINE_METRIC_2D":
exec(open('config_files/exp_mujoco_offline_metric_2d.py').read())
elif mode == "TOUCH_EMB":
exec(compile(open('config_files/exp_touch_emb.py').read(), 'exp_touch_emb.py', 'exec'))
elif mode=="CUSTOM":
exec(compile(open('exp_custom.py').read(), 'exp_custom.py', 'exec'))
else:
assert(False) # what mode is this?
############ make some final adjustments ############
trainset_path = "%s/%s.txt" % (dataset_list_dir, trainset)
valset_path = "%s/%s.txt" % (dataset_list_dir, valset)
testset_path = "%s/%s.txt" % (dataset_list_dir, testset)
data_paths = {}
data_paths['train'] = trainset_path
data_paths['val'] = valset_path
data_paths['test'] = testset_path
set_nums = {}
set_nums['train'] = 0
set_nums['val'] = 1
set_nums['test'] = 2
set_names = ['train', 'val', 'test']
log_freqs = {}
log_freqs['train'] = log_freq_train
log_freqs['val'] = log_freq_val
log_freqs['test'] = log_freq_test
shuffles = {}
shuffles['train'] = shuffle_train
shuffles['val'] = shuffle_val
shuffles['test'] = shuffle_test
############ autogen a name; don't touch any hypers! ############
def | (x):
s = '%g' % x
if '.' in s:
s = s[s.index('.'):]
return s
name = "%02d_m%dx%dx%d" % (B, Z,Y,X)
if do_view or do_emb2D:
name += "_p%dx%d" % (PH,PW)
if lr > 0.0:
lrn = "%.1e" % lr
# e.g., 5.0e-04
lrn = lrn[0] + lrn[3:5] + lrn[-1]
name += "_%s" % lrn
if do_feat:
name += "_F"
name += "%d" % feat_dim
if feat_do_flip:
name += "l"
if feat_do_rt:
name += "r"
if feat_do_vae:
name += "v"
if feat_do_sb:
name += 'b'
if feat_do_resnet:
name += 'r'
if feat_do_sparse_invar:
name += 'i'
if do_freeze_feat:
name += "f"
else:
feat_losses = [feat_rigid_coeff,
feat_kl_coeff,
]
feat_prefixes = ["r",
"k",
]
for l_, l in enumerate(feat_losses):
if l > 0:
name += "_%s%s" % (feat_prefixes[l_],strnum(l))
if do_touch_feat:
name += "_TF"
name += "%d" % feat_dim
if do_ego:
name += "_G"
if ego_use_gt:
name += "gt"
elif ego_use_precomputed:
name += "pr"
else:
if do_freeze_ego:
name += "f"
else:
ego_losses = [ego_rtd_coeff,
ego_rta_coeff,
ego_traj_coeff,
ego_warp_coeff,
]
ego_prefixes = ["rtd",
"rta",
"t",
"w",
]
for l_, l in enumerate(ego_losses):
if l > 0:
name += "_%s%s" % (ego_prefixes[l_],strnum(l))
if do_obj:
name += "_J"
# name += "%d" % obj_dim
if do_freeze_obj:
name += "f"
else:
# no real hyps here
pass
if do_box:
name += "_B"
# name += "%d" % box_dim
if do_freeze_box:
name += "f"
else:
box_coeffs = [box_sup_coeff,
box_cs_coeff,
# box_smooth_coeff,
]
box_prefixes = ["su",
"cs",
# "s",
]
for l_, l in enumerate(box_coeffs):
if l > 0:
name += "_%s%s" % (box_prefixes[l_],strnum(l))
if do_ort:
name += "_O"
# name += "%d" % ort_dim
if do_freeze_ort:
name += "f"
else:
ort_coeffs = [ort_coeff,
ort_warp_coeff,
# ort_smooth_coeff,
]
ort_prefixes = ["c",
"w",
# "s",
]
for l_, l in enumerate(ort_coeffs):
if l > 0:
name += "_%s%s" % (ort_prefixes[l_],strnum(l))
if do_inp:
name += "_I"
# name += "%d" % inp_dim
if do_freeze_inp:
name += "f"
else:
inp_coeffs = [inp_coeff,
# inp_smooth_coeff,
]
inp_prefixes = ["c",
# "s",
]
for l_, l in enumerate(inp_coeffs):
if l > 0:
name += "_%s%s" % (inp_prefixes[l_],strnum(l))
if do_traj:
name += "_T"
name += "%d" % traj_dim
if do_freeze_traj:
name += "f"
else:
# no real hyps here
pass
if do_occ:
name += "_O"
if occ_do_cheap:
name += "c"
if do_freeze_occ:
name += "f"
else:
occ_coeffs = [occ_coeff,
occ_smooth_coeff,
]
occ_prefixes = ["c",
"s",
]
for l_, l in enumerate(occ_coeffs):
if l > 0:
name += "_%s%s" % (occ_prefixes[l_],strnum(l))
if do_touch_occ:
name += "_TO"
if occ_do_cheap:
name += "c"
if do_freeze_occ:
name += "f"
else:
occ_coeffs = [occ_coeff,
occ_smooth_coeff,
]
occ_prefixes = ["c",
"s",
]
for l_, l in enumerate(occ_coeffs):
if l > 0:
name += "_%s%s" % (occ_prefixes[l_],strnum(l))
if do_view:
name += "_V"
if view_pred_embs:
name += "e"
if view_pred_rgb:
name += "r"
if do_freeze_view:
name += "f"
# sometimes, even if view is frozen, we use the loss
# to train other nets
view_coeffs = [view_depth,
view_l1_coeff,
view_ce_coeff,
view_dl_coeff,
]
view_prefixes = ["d",
"c",
"e",
"s",
]
for l_, l in enumerate(view_coeffs):
if l > 0:
name += "_%s%s" % (view_prefixes[l_],strnum(l))
if do_vis:
name += "_V"
if vis_debug:
name += 'd'
if do_freeze_vis:
name += "f"
else:
vis_coeffs = [vis_softmax_coeff,
vis_hard_coeff,
vis_l1_coeff,
]
vis_prefixes = ["s",
"h",
"c",
]
for l_, l in enumerate(vis_coeffs):
if l > 0:
name += "_%s%s" % (vis_prefixes[l_],strnum(l))
if do_emb2D:
name += "_E2"
if do_freeze_emb2D:
name += "f"
emb_coeffs = [emb_2D_smooth_coeff,
emb_2D_ml_coeff,
emb_2D_l2_coeff,
emb_2D_num_samples,
emb_2D_mindist,
]
emb_prefixes = ["s",
"m",
"e",
"n",
"d",
]
for l_, l in enumerate(emb_coeffs):
if l > 0:
name += "_%s%s" % (emb_prefixes[l_],strnum(l))
if do_emb3D:
name += "_E3"
emb_coeffs = [emb_3D_smooth_coeff,
emb_3D_ml_coeff,
emb_3D_l2_coeff,
emb_3D_num_samples,
emb_3D_mindist,
]
emb_prefixes = ["s",
"m",
"e",
"n",
"d",
]
for l_, l in enumerate(emb_coeffs):
if l > 0:
name += "_%s%s" % (emb_prefixes[l_],strnum(l))
if do_touch_embML:
name += "_touchE3"
emb_coeffs = [emb_3D_smooth_coeff,
emb_3D_ml_coeff,
emb_3D_l2_coeff,
emb_3D_num_samples,
emb_3D_mindist,
]
emb_prefixes = ["s",
"m",
"e",
"n",
"d",
]
for l_, l in enumerate(emb_coeffs):
if l > 0:
name += "_%s%s" % (emb_prefixes[l_],strnum(l))
if do_touch_forward:
name += "_tforward"
# hyperparams if any go here
forward_vars = [contextH,
contextW,
contextD]
forward_prefixes = ['ch', 'cw', 'cd']
for l_, l in enumerate(forward_vars):
if l > 0:
name += "_%s%s" % (forward_prefixes[l_], strnum(l))
if do_moc:
name += "_mocml"
moc_vars = [num_neg_samples,
num_pos_samples,
dict_len,
do_bn,
emb_3D_mindist]
moc_prefixes = ['nns', 'nps', 'dl', 'do_bn', 'md']
for l_, l in enumerate(moc_vars):
if l > 0:
name += "_%s%s" % (moc_prefixes[l_], strnum(l))
if do_flow:
name += "_F"
if do_freeze_flow:
name += "f"
else:
flow_coeffs = [flow_warp_coeff,
flow_cycle_coeff,
flow_smooth_coeff,
flow_l1_coeff,
flow_synth_l1_coeff,
]
flow_prefixes = ["w",
"c",
"s",
"e",
"y",
]
for l_, l in enumerate(flow_coeffs):
if l > 0:
name += "_%s%s" % (flow_prefixes[l_],strnum(l))
##### end model description
# add some training data info
sets_to_run = {}
if trainset:
name = "%s_%s" % (name, trainset)
sets_to_run['train'] = True
else:
sets_to_run['train'] = False
if valset:
name = "%s_%s" % (name, valset)
sets_to_run['val'] = True
else:
sets_to_run['val'] = False
if testset:
name = "%s_%s" % (name, testset)
sets_to_run['test'] = True
else:
sets_to_run['test'] = False
sets_to_backprop = {}
sets_to_backprop['train'] = backprop_on_train
sets_to_backprop['val'] = backprop_on_val
sets_to_backprop['test'] = backprop_on_test
if (do_aug_color or
do_horz_flip or
do_time_flip or
do_synth_rt or
do_piecewise_rt or
do_synth_nomotion or
do_sparsify_pointcloud):
name += "_A"
if do_aug_color:
name += "c"
if do_horz_flip:
name += "h"
if do_time_flip:
name += "t"
if do_synth_rt:
assert(not do_piecewise_rt)
name += "s"
if do_piecewise_rt:
assert(not do_synth_rt)
name += "p"
if do_synth_nomotion:
name += "n"
if do_sparsify_pointcloud:
name += "v"
if (not shuffle_train) or (not shuffle_val) or (not shuffle_test):
name += "_ns"
if do_profile:
name += "_PR"
if mod:
name = "%s_%s" % (name, mod)
if do_resume:
total_init = name
if do_eval_recall:
name += '_ev_re1_evaluation'
if do_validation:
splits = validation_path.split('/')
val_path = splits[-1][:-4]
name += f'val_{val_path}'
print(name)
| strnum |
go1_13_encoding_gob.go | // Code generated by 'goexports encoding/gob'. DO NOT EDIT.
// +build go1.13,!go1.14
package stdlib
import (
"encoding/gob"
"reflect"
)
func | () {
Symbols["encoding/gob"] = map[string]reflect.Value{
// function, constant and variable definitions
"NewDecoder": reflect.ValueOf(gob.NewDecoder),
"NewEncoder": reflect.ValueOf(gob.NewEncoder),
"Register": reflect.ValueOf(gob.Register),
"RegisterName": reflect.ValueOf(gob.RegisterName),
// type definitions
"CommonType": reflect.ValueOf((*gob.CommonType)(nil)),
"Decoder": reflect.ValueOf((*gob.Decoder)(nil)),
"Encoder": reflect.ValueOf((*gob.Encoder)(nil)),
"GobDecoder": reflect.ValueOf((*gob.GobDecoder)(nil)),
"GobEncoder": reflect.ValueOf((*gob.GobEncoder)(nil)),
// interface wrapper definitions
"_GobDecoder": reflect.ValueOf((*_encoding_gob_GobDecoder)(nil)),
"_GobEncoder": reflect.ValueOf((*_encoding_gob_GobEncoder)(nil)),
}
}
// _encoding_gob_GobDecoder is an interface wrapper for GobDecoder type
type _encoding_gob_GobDecoder struct {
WGobDecode func(a0 []byte) error
}
func (W _encoding_gob_GobDecoder) GobDecode(a0 []byte) error { return W.WGobDecode(a0) }
// _encoding_gob_GobEncoder is an interface wrapper for GobEncoder type
type _encoding_gob_GobEncoder struct {
WGobEncode func() ([]byte, error)
}
func (W _encoding_gob_GobEncoder) GobEncode() ([]byte, error) { return W.WGobEncode() }
| init |
0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
| dependencies = [
]
operations = [
migrations.CreateModel(
name='Rate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rate', models.DecimalField(null=True, verbose_name=b'Exchange rate', max_digits=8, decimal_places=4, blank=True)),
('date', models.DateField(db_index=True)),
('currency', models.CharField(default=b'USD', max_length=3, db_index=True, choices=[(b'CHF', b'CHF'), (b'EUR', b'EUR'), (b'GBP', b'GBP'), (b'USD', b'USD')])),
],
options={
'ordering': ['-date', 'currency'],
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='rate',
unique_together=set([('date', 'currency')]),
),
] |
|
BedTokenMarketDataContext.ts | import { createContext } from 'react'
interface BedTokenMarketDataValues {
prices?: number[][]
hourlyPrices?: number[][]
marketcaps?: number[][]
volumes?: number[][]
latestPrice?: number
latestMarketCap?: number | const BedMarketData = createContext<BedTokenMarketDataValues>({})
export default BedMarketData | latestVolume?: number
}
|
LayoutStateManager-min.js | /*
* Ext JS Library 1.1.1
* Copyright(c) 2006-2007, Ext JS, LLC.
* [email protected]
*
* http://www.extjs.com/license
*/ |
Ext.LayoutStateManager=function(A){this.state={north:{},south:{},east:{},west:{}}};Ext.LayoutStateManager.prototype={init:function(D,G){this.provider=G;var F=G.get(D.id+"-layout-state");if(F){var E=D.isUpdating();if(!E){D.beginUpdate()}for(var A in F){if(typeof F[A]!="function"){var B=F[A];var C=D.getRegion(A);if(C&&B){if(B.size){C.resizeTo(B.size)}if(B.collapsed==true){C.collapse(true)}else{C.expand(null,true)}}}}if(!E){D.endUpdate()}this.state=F}this.layout=D;D.on("regionresized",this.onRegionResized,this);D.on("regioncollapsed",this.onRegionCollapsed,this);D.on("regionexpanded",this.onRegionExpanded,this)},storeState:function(){this.provider.set(this.layout.id+"-layout-state",this.state)},onRegionResized:function(B,A){this.state[B.getPosition()].size=A;this.storeState()},onRegionCollapsed:function(A){this.state[A.getPosition()].collapsed=true;this.storeState()},onRegionExpanded:function(A){this.state[A.getPosition()].collapsed=false;this.storeState()}}; |
|
test_entity_data_api.py | # coding: utf-8
"""
Senzing REST API
This is the Senzing REST API. It describes the REST interface to Senzing API functions available via REST. It leverages the Senzing native API which is documented at [https://docs.senzing.com](https://docs.senzing.com) # noqa: E501
OpenAPI spec version: 1.6.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from api.entity_data_api import EntityDataApi # noqa: E501
from swagger_client.rest import ApiException
class TestEntityDataApi(unittest.TestCase):
"""EntityDataApi unit test stubs"""
def setUp(self):
self.api = api.entity_data_api.EntityDataApi() # noqa: E501
def tearDown(self):
pass
def test_add_record(self):
"""Test case for add_record
Load a new record or replace a record in a data source with a specific record ID. # noqa: E501
"""
pass
def test_add_record_with_returned_record_id(self):
"""Test case for add_record_with_returned_record_id
Load a new record in # noqa: E501
"""
pass
def test_get_data_source_record(self):
"""Test case for get_data_source_record
Get an entity record by data source and record ID. # noqa: E501
"""
pass
def test_get_entity_by_entity_id(self):
"""Test case for get_entity_by_entity_id
Get a resolved entity by entity ID. # noqa: E501
"""
pass
def | (self):
"""Test case for get_entity_by_record_id
Get a resolved entity by data source and record ID. # noqa: E501
"""
pass
def test_search_by_attributes(self):
"""Test case for search_by_attributes
Search for entities that would match or relate to the provided entity features. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| test_get_entity_by_record_id |
OrganizationController.ts | import {
Authorized,
Body,
Delete,
Get,
JsonController,
OnUndefined,
Param,
Patch,
Post,
QueryParams, UseBefore,
UseInterceptor,
} from 'routing-controllers';
import { ResponseSchema } from 'routing-controllers-openapi';
import _ from 'lodash';
import { OrganizationService, CListData } from '../services';
import { Organization } from '../models';
import { OrganizationNotFoundError } from '../errors';
import { ClassTransformerInterceptor, ListResponseInterceptor } from '../interceptors';
import { ErrorResponse, BaseOrganization, OrganizationResponse } from './responses';
import { BaseQuery, FullQuery, UpdateOrganizationBody } from './requests';
import { ParseHelper } from '../services/common';
import { CheckRoleFuncMiddleware } from '../middlewares';
import { UserRoles } from '../middlewares/CheckRoleFuncMiddleware';
@Authorized()
@JsonController('/organizations')
@ResponseSchema(ErrorResponse, {statusCode: 401})
@ResponseSchema(ErrorResponse, {statusCode: 404})
@ResponseSchema(ErrorResponse, {statusCode: 500})
export class | {
constructor(private organizationService: OrganizationService, private parseHelper: ParseHelper) {}
@Get()
@UseInterceptor(ClassTransformerInterceptor(OrganizationResponse))
@UseInterceptor(ListResponseInterceptor)
@OnUndefined(OrganizationNotFoundError)
@ResponseSchema(OrganizationResponse, { isArray: true })
public find(
@QueryParams() query: FullQuery
): Promise<CListData<Organization> | undefined> {
const queryParse = this.parseHelper.fullQueryParam(query);
return this.organizationService.find(queryParse);
}
@Post()
@UseBefore(CheckRoleFuncMiddleware(UserRoles.admin))
@OnUndefined(OrganizationNotFoundError)
@ResponseSchema(OrganizationResponse)
public create(
@Body({ required: true }) body: BaseOrganization
): Promise<Organization | undefined> {
const organization = new Organization();
_.assign(organization, body);
return this.organizationService.create(organization);
}
@Get('/:id')
@UseInterceptor(ClassTransformerInterceptor(OrganizationResponse))
@OnUndefined(OrganizationNotFoundError)
@ResponseSchema(OrganizationResponse)
public async findOne(
@Param('id') id: string,
@QueryParams() query: BaseQuery
): Promise<Organization | undefined> {
const queryParse = this.parseHelper.baseQueryParam(query);
return await this.organizationService.findOne(id, queryParse);
}
@Patch('/:id')
@UseBefore(CheckRoleFuncMiddleware(UserRoles.admin))
@OnUndefined(OrganizationNotFoundError)
@ResponseSchema(OrganizationResponse)
public update(
@Param('id') id: string,
@Body({ required: true }) body: UpdateOrganizationBody
): Promise<Organization | undefined> {
const bodyParse = this.parseHelper.removeUndefinedProperty(body);
return this.organizationService.update(id, bodyParse);
}
@Delete('/:id')
@UseBefore(CheckRoleFuncMiddleware(UserRoles.admin))
@OnUndefined(OrganizationNotFoundError)
public delete(@Param('id') id: string): Promise<Organization | undefined> {
return this.organizationService.delete(id);
}
}
| OrganizationController |
config.module.ts | import { Module } from '@nestjs/common';
import { ConfigService } from './config.service'; | providers: [ConfigService],
})
export class ConfigModule {} |
@Module({
imports: [], |
lab6_p1.py | 'name': '6.1',
'suites': [
{
'cases': [
{
'code': r"""
>>> # It looks like you did not follow the format
>>> # given in the problem. Maybe there's a typo?
>>> 'new_letter' in vars()
True
"""
},
{
'code': r"""
>>> # Your loop is off.
>>> new_letter == 'e .'
True
"""
},
]
}
]
} | test = { |
|
lib.rs | extern crate proc_macro;
#[proc_macro]
pub fn do_crimes(_: proc_macro::TokenStream) -> proc_macro::TokenStream | {
if !std::env::args_os().any(|arg| arg == "--cfg=yolo_rustc_bootstrap") {
let mut args = std::env::args_os();
let status = std::process::Command::new(args.next().unwrap())
.arg("--cfg=yolo_rustc_bootstrap")
.args(args)
.env("RUSTC_BOOTSTRAP", "1")
.status()
.unwrap();
std::process::exit(status.code().unwrap_or(101));
}
Default::default()
} |
|
init.go | /*
Copyright © 2020 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bufio"
"fmt"
"log"
"os"
"text/template"
"github.com/spf13/cobra"
)
// initCmd represents the init command
var initCmd = &cobra.Command{
Use: "init",
Short: "Generate the default configuration file in your home directory",
Long: `Generate the default configuration file
The file that will be created:
- $HOME/.dirlink.yaml`,
Run: func(cmd *cobra.Command, args []string) {
if err := runInit(); err != nil { | },
}
func runInit() error {
tmpl, err := template.New("config").Parse(string(MustAsset("cmd/data/templates/.dirlink.yaml")))
if err != nil {
return err
}
path := fmt.Sprintf("%s/.dirlink.yaml", os.Getenv("HOME"))
if fileExists(path) {
log.Printf("File already exists: %s\n", path)
return nil
}
f, err := os.Create(path)
if err != nil {
return err
}
defer f.Close()
w := bufio.NewWriter(f)
if err = tmpl.Execute(w, ""); err != nil {
return err
}
if err = w.Flush(); err != nil {
return err
}
log.Printf("File created at %s\n", path)
return nil
}
func fileExists(path string) bool {
_, err := os.Stat(path)
if err == nil {
return true
}
return os.IsExist(err)
}
func init() {
rootCmd.AddCommand(initCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// initCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// initCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
|
log.Fatal(err)
}
|
proxy.rs | use std::collections::HashMap;
use std::io::BufReader;
use std::process::Command;
use std::process::Stdio;
use std::thread;
use std::{path::PathBuf, process::Child, sync::Arc};
use alacritty_terminal::term::cell::Cell;
use anyhow::{anyhow, Result};
use crossbeam_channel::Sender;
use crossbeam_utils::sync::WaitGroup;
use druid::{ExtEventSink, WidgetId};
use druid::{Target, WindowId};
use flate2::read::GzDecoder;
use lapce_proxy::dispatch::{FileNodeItem, NewBufferResponse};
use lapce_proxy::plugin::PluginDescription;
use lapce_proxy::terminal::TermId;
use lsp_types::CompletionItem;
use lsp_types::Position;
use lsp_types::ProgressParams;
use lsp_types::PublishDiagnosticsParams;
use lsp_types::WorkDoneProgress;
use parking_lot::{Condvar, Mutex};
use serde::{Deserialize, Deserializer, Serialize};
use serde_json::json;
use serde_json::Value;
use xi_rope::RopeDelta;
use xi_rpc::Callback;
use xi_rpc::Handler;
use xi_rpc::RpcLoop;
use xi_rpc::RpcPeer;
use crate::command::LapceUICommand;
use crate::state::LapceWorkspace;
use crate::state::LapceWorkspaceType;
use crate::terminal::RawTerminal;
use crate::{buffer::BufferId, command::LAPCE_UI_COMMAND};
const VERSION: &str = env!("CARGO_PKG_VERSION");
pub enum TermEvent {
NewTerminal(Arc<Mutex<RawTerminal>>),
UpdateContent(String),
CloseTerminal,
}
#[derive(Clone)]
pub struct LapceProxy {
peer: Arc<Mutex<Option<RpcPeer>>>,
process: Arc<Mutex<Option<Child>>>,
initiated: Arc<Mutex<bool>>,
cond: Arc<Condvar>,
term_tx: Sender<(TermId, TermEvent)>,
pub tab_id: WidgetId,
}
impl LapceProxy {
pub fn new(tab_id: WidgetId, term_tx: Sender<(TermId, TermEvent)>) -> Self {
let proxy = Self {
peer: Arc::new(Mutex::new(None)),
process: Arc::new(Mutex::new(None)),
initiated: Arc::new(Mutex::new(false)),
cond: Arc::new(Condvar::new()),
term_tx,
tab_id,
};
proxy
}
pub fn start(&self, workspace: LapceWorkspace, event_sink: ExtEventSink) {
let proxy = self.clone();
*proxy.initiated.lock() = false;
let tab_id = self.tab_id;
let term_tx = self.term_tx.clone();
thread::spawn(move || {
let mut child = match workspace.kind {
LapceWorkspaceType::Local => Command::new(
std::env::current_exe()
.unwrap()
.parent()
.unwrap()
.join("lapce-proxy"),
)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn(),
LapceWorkspaceType::RemoteSSH(user, host) => {
let cmd = Command::new("ssh")
.arg(format!("{}@{}", user, host))
.arg("-o")
.arg("ControlMaster=auto")
.arg("-o")
.arg("ControlPath=~/.ssh/cm-%r@%h:%p")
.arg("-o")
.arg("ControlPersist=30m")
.arg("test")
.arg("-e")
.arg(format!("~/.lapce/lapce-proxy-{}", VERSION))
.output()
.unwrap();
println!("ssh check proxy file {:?}", cmd.status);
if !cmd.status.success() {
let url = format!("https://github.com/lapce/lapce/releases/download/v{VERSION}/lapce-proxy-linux.gz");
let mut resp =
reqwest::blocking::get(url).expect("request failed");
let local_path = format!("/tmp/lapce-proxy-{}", VERSION);
let mut out = std::fs::File::create(&local_path)
.expect("failed to create file");
let mut gz = GzDecoder::new(&mut resp);
std::io::copy(&mut gz, &mut out)
.expect("failed to copy content");
Command::new("ssh")
.arg(format!("{}@{}", user, host))
.arg("-o")
.arg("ControlMaster=auto")
.arg("-o")
.arg("ControlPath=~/.ssh/cm-%r@%h:%p")
.arg("-o")
.arg("ControlPersist=30m")
.arg("mkdir")
.arg("~/.lapce/")
.output()
.unwrap();
Command::new("scp")
.arg("-o")
.arg("ControlMaster=auto")
.arg("-o")
.arg("ControlPath=~/.ssh/cm-%r@%h:%p")
.arg("-o")
.arg("ControlPersist=30m")
.arg(&local_path)
.arg(format!(
"{user}@{host}:~/.lapce/lapce-proxy-{VERSION}"
))
.output()
.unwrap();
Command::new("ssh")
.arg(format!("{}@{}", user, host))
.arg("-o")
.arg("ControlMaster=auto")
.arg("-o")
.arg("ControlPath=~/.ssh/cm-%r@%h:%p")
.arg("-o")
.arg("ControlPersist=30m")
.arg("chmod")
.arg("+x")
.arg(format!("~/.lapce/lapce-proxy-{}", VERSION))
.output()
.unwrap();
}
Command::new("ssh")
.arg(format!("{}@{}", user, host))
.arg("-o")
.arg("ControlMaster=auto")
.arg("-o")
.arg("ControlPath=~/.ssh/cm-%r@%h:%p")
.arg("-o")
.arg("ControlPersist=30m")
.arg(format!("~/.lapce/lapce-proxy-{}", VERSION))
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
}
};
if child.is_err() {
println!("can't start proxy {:?}", child);
return;
}
let mut child = child.unwrap();
let child_stdin = child.stdin.take().unwrap();
let child_stdout = child.stdout.take().unwrap();
let mut looper = RpcLoop::new(child_stdin);
let peer: RpcPeer = Box::new(looper.get_raw_peer());
{
*proxy.peer.lock() = Some(peer);
let mut process = proxy.process.lock();
let mut old_process = process.take();
*process = Some(child);
if let Some(mut old) = old_process {
old.kill();
}
}
proxy.initialize(workspace.path.clone());
{
*proxy.initiated.lock() = true;
proxy.cond.notify_all();
}
let mut handler = ProxyHandlerNew {
tab_id,
term_tx,
event_sink,
};
if let Err(e) =
looper.mainloop(|| BufReader::new(child_stdout), &mut handler)
{
println!("proxy main loop failed {:?}", e);
}
println!("proxy main loop exit");
});
}
fn wait(&self) {
let mut initiated = self.initiated.lock();
if !*initiated {
self.cond.wait(&mut initiated);
}
}
pub fn initialize(&self, workspace: PathBuf) {
self.peer.lock().as_ref().unwrap().send_rpc_notification(
"initialize",
&json!({
"workspace": workspace,
}),
)
}
pub fn terminal_close(&self, term_id: TermId) {
self.peer.lock().as_ref().unwrap().send_rpc_notification(
"terminal_close",
&json!({
"term_id": term_id,
}),
)
}
pub fn terminal_resize(&self, term_id: TermId, width: usize, height: usize) {
self.wait();
self.peer.lock().as_ref().unwrap().send_rpc_notification(
"terminal_resize",
&json!({
"term_id": term_id,
"width": width,
"height": height,
}),
)
}
pub fn terminal_write(&self, term_id: TermId, content: &str) {
self.wait();
self.peer.lock().as_ref().unwrap().send_rpc_notification(
"terminal_write",
&json!({
"term_id": term_id,
"content": content,
}),
)
}
pub fn new_terminal(
&self,
term_id: TermId,
cwd: Option<PathBuf>,
raw: Arc<Mutex<RawTerminal>>,
) {
self.term_tx.send((term_id, TermEvent::NewTerminal(raw)));
self.wait();
self.peer.lock().as_ref().unwrap().send_rpc_notification(
"new_terminal",
&json!({
"term_id": term_id,
"cwd": cwd,
}),
)
}
pub fn install_plugin(&self, plugin: &PluginDescription) {
self.peer
.lock()
.as_ref()
.unwrap()
.send_rpc_notification("install_plugin", &json!({ "plugin": plugin }));
}
pub fn get_buffer_head(
&self,
buffer_id: BufferId,
path: PathBuf,
f: Box<dyn Callback>,
) {
self.wait();
self.peer.lock().as_ref().unwrap().send_rpc_request_async(
"buffer_head",
&json!({ "buffer_id": buffer_id, "path": path, }),
f,
);
}
pub fn new_buffer(
&self,
buffer_id: BufferId,
path: PathBuf,
f: Box<dyn Callback>,
) {
self.wait();
self.peer.lock().as_ref().unwrap().send_rpc_request_async(
"new_buffer",
&json!({ "buffer_id": buffer_id, "path": path }),
f,
);
}
pub fn update(&self, buffer_id: BufferId, delta: &RopeDelta, rev: u64) {
self.peer.lock().as_ref().unwrap().send_rpc_notification(
"update",
&json!({
"buffer_id": buffer_id,
"delta": delta,
"rev": rev,
}),
)
}
pub fn save(&self, rev: u64, buffer_id: BufferId, f: Box<dyn Callback>) {
self.peer.lock().as_ref().unwrap().send_rpc_request_async(
"save",
&json!({
"rev": rev,
"buffer_id": buffer_id,
}),
f,
);
}
pub fn get_completion(
&self,
request_id: usize,
buffer_id: BufferId,
position: Position,
f: Box<dyn Callback>,
) {
self.peer.lock().as_ref().unwrap().send_rpc_request_async(
"get_completion",
&json!({
"request_id": request_id,
"buffer_id": buffer_id,
"position": position,
}),
f,
);
}
pub fn completion_resolve(
&self,
buffer_id: BufferId,
completion_item: CompletionItem,
f: Box<dyn Callback>,
) {
self.peer.lock().as_ref().unwrap().send_rpc_request_async(
"completion_resolve",
&json!({
"buffer_id": buffer_id,
"completion_item": completion_item,
}),
f,
);
}
pub fn get_signature(
&self,
buffer_id: BufferId,
position: Position,
f: Box<dyn Callback>,
) |
pub fn get_references(
&self,
buffer_id: BufferId,
position: Position,
f: Box<dyn Callback>,
) {
self.peer.lock().as_ref().unwrap().send_rpc_request_async(
"get_references",
&json!({
"buffer_id": buffer_id,
"position": position,
}),
f,
);
}
pub fn get_files(&self, f: Box<dyn Callback>) {
if let Some(peer) = self.peer.lock().as_ref() {
peer.send_rpc_request_async(
"get_files",
&json!({
"path": "path",
}),
f,
);
}
}
pub fn read_dir(&self, path: &PathBuf, f: Box<dyn Callback>) {
self.wait();
self.peer.lock().as_ref().unwrap().send_rpc_request_async(
"read_dir",
&json!({
"path": path,
}),
f,
);
}
pub fn get_definition(
&self,
request_id: usize,
buffer_id: BufferId,
position: Position,
f: Box<dyn Callback>,
) {
self.peer.lock().as_ref().unwrap().send_rpc_request_async(
"get_definition",
&json!({
"request_id": request_id,
"buffer_id": buffer_id,
"position": position,
}),
f,
);
}
pub fn get_document_symbols(&self, buffer_id: BufferId, f: Box<dyn Callback>) {
self.peer.lock().as_ref().unwrap().send_rpc_request_async(
"get_document_symbols",
&json!({
"buffer_id": buffer_id,
}),
f,
);
}
pub fn get_code_actions(
&self,
buffer_id: BufferId,
position: Position,
f: Box<dyn Callback>,
) {
if let Some(peer) = self.peer.lock().as_ref() {
peer.send_rpc_request_async(
"get_code_actions",
&json!({
"buffer_id": buffer_id,
"position": position,
}),
f,
);
}
}
pub fn get_document_formatting(
&self,
buffer_id: BufferId,
f: Box<dyn Callback>,
) {
self.peer.lock().as_ref().unwrap().send_rpc_request_async(
"get_document_formatting",
&json!({
"buffer_id": buffer_id,
}),
f,
);
}
pub fn stop(&self) {
let mut process = self.process.lock();
if let Some(mut p) = process.as_mut() {
p.kill();
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum CursorShape {
/// Cursor is a block like `▒`.
Block,
/// Cursor is an underscore like `_`.
Underline,
/// Cursor is a vertical bar `⎸`.
Beam,
/// Cursor is a box like `☐`.
HollowBlock,
/// Invisible cursor.
Hidden,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
#[serde(tag = "method", content = "params")]
pub enum Notification {
SemanticTokens {
rev: u64,
buffer_id: BufferId,
path: PathBuf,
tokens: Vec<(usize, usize, String)>,
},
UpdateGit {
buffer_id: BufferId,
line_changes: HashMap<usize, char>,
rev: u64,
},
ReloadBuffer {
buffer_id: BufferId,
new_content: String,
rev: u64,
},
PublishDiagnostics {
diagnostics: PublishDiagnosticsParams,
},
WorkDoneProgress {
progress: ProgressParams,
},
InstalledPlugins {
plugins: HashMap<String, PluginDescription>,
},
ListDir {
items: Vec<FileNodeItem>,
},
DiffFiles {
files: Vec<PathBuf>,
},
UpdateTerminal {
term_id: TermId,
content: String,
},
CloseTerminal {
term_id: TermId,
},
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Request {}
pub struct ProxyHandlerNew {
tab_id: WidgetId,
term_tx: Sender<(TermId, TermEvent)>,
event_sink: ExtEventSink,
}
impl Handler for ProxyHandlerNew {
type Notification = Notification;
type Request = Request;
fn handle_notification(
&mut self,
ctx: &xi_rpc::RpcCtx,
rpc: Self::Notification,
) {
match rpc {
Notification::SemanticTokens {
rev,
buffer_id,
path,
tokens,
} => {
self.event_sink.submit_command(
LAPCE_UI_COMMAND,
LapceUICommand::UpdateSemanticTokens(
buffer_id, path, rev, tokens,
),
Target::Widget(self.tab_id),
);
}
Notification::UpdateGit {
buffer_id,
line_changes,
rev,
} => {
self.event_sink.submit_command(
LAPCE_UI_COMMAND,
LapceUICommand::UpdateBufferLineChanges(
buffer_id,
rev,
line_changes,
),
Target::Widget(self.tab_id),
);
}
Notification::ReloadBuffer {
buffer_id,
new_content,
rev,
} => {
self.event_sink.submit_command(
LAPCE_UI_COMMAND,
LapceUICommand::ReloadBuffer(buffer_id, rev, new_content),
Target::Widget(self.tab_id),
);
}
Notification::PublishDiagnostics { diagnostics } => {
self.event_sink.submit_command(
LAPCE_UI_COMMAND,
LapceUICommand::PublishDiagnostics(diagnostics),
Target::Widget(self.tab_id),
);
}
Notification::WorkDoneProgress { progress } => {
self.event_sink.submit_command(
LAPCE_UI_COMMAND,
LapceUICommand::WorkDoneProgress(progress),
Target::Widget(self.tab_id),
);
}
Notification::InstalledPlugins { plugins } => {
self.event_sink.submit_command(
LAPCE_UI_COMMAND,
LapceUICommand::UpdateInstalledPlugins(plugins),
Target::Widget(self.tab_id),
);
}
Notification::ListDir { items } => {}
Notification::DiffFiles { files } => {
self.event_sink.submit_command(
LAPCE_UI_COMMAND,
LapceUICommand::UpdateDiffFiles(files),
Target::Widget(self.tab_id),
);
}
Notification::UpdateTerminal { term_id, content } => {
self.term_tx
.send((term_id, TermEvent::UpdateContent(content)));
}
Notification::CloseTerminal { term_id } => {
self.term_tx.send((term_id, TermEvent::CloseTerminal));
self.event_sink.submit_command(
LAPCE_UI_COMMAND,
LapceUICommand::CloseTerminal(term_id),
Target::Widget(self.tab_id),
);
}
}
}
fn handle_request(
&mut self,
ctx: &xi_rpc::RpcCtx,
rpc: Self::Request,
) -> Result<serde_json::Value, xi_rpc::RemoteError> {
Err(xi_rpc::RemoteError::InvalidRequest(None))
}
}
| {
self.peer.lock().as_ref().unwrap().send_rpc_request_async(
"get_signature",
&json!({
"buffer_id": buffer_id,
"position": position,
}),
f,
);
} |
datainbyte.rs | #[doc = "Register `DATAINBYTE[%s]` reader"]
pub struct R(crate::R<DATAINBYTE_SPEC>); | fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<DATAINBYTE_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<DATAINBYTE_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Data In Register by Byte\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [datainbyte](index.html) module"]
pub struct DATAINBYTE_SPEC;
impl crate::RegisterSpec for DATAINBYTE_SPEC {
type Ux = u8;
}
#[doc = "`read()` method returns [datainbyte::R](R) reader structure"]
impl crate::Readable for DATAINBYTE_SPEC {
type Reader = R;
}
#[doc = "`reset()` method sets DATAINBYTE[%s]
to value 0"]
impl crate::Resettable for DATAINBYTE_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
} | impl core::ops::Deref for R {
type Target = crate::R<DATAINBYTE_SPEC>;
#[inline(always)] |
fallback_page.rs | use headers::{AcceptRanges, ContentLength, ContentType, HeaderMapExt};
use hyper::{Body, Response, StatusCode};
use mime_guess::mime;
/// Checks if a fallback response can be generated, i.e. if it is a GET request that would result in a 404 error and a fallback page is configured.
/// If a response can be generated, it is returned, else `None` is returned.
pub fn | (page_fallback: &str) -> Response<Body> {
let body = Body::from(page_fallback.to_owned());
let len = page_fallback.len() as u64;
let mut resp = Response::new(body);
*resp.status_mut() = StatusCode::OK;
resp.headers_mut().typed_insert(ContentLength(len));
resp.headers_mut()
.typed_insert(ContentType::from(mime::TEXT_HTML_UTF_8));
resp.headers_mut().typed_insert(AcceptRanges::bytes());
resp
}
| fallback_response |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.