repo_name
stringlengths 4
116
| path
stringlengths 4
379
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
shakirbsm/dealii | tests/matrix_free/thread_correctness_hp.cc | 7128 | // ---------------------------------------------------------------------
//
// Copyright (C) 2013 - 2015-2014 by the deal.II authors
//
// This file is part of the deal.II library.
//
// The deal.II library is free software; you can use it, redistribute
// it, and/or modify it under the terms of the GNU Lesser General
// Public License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// The full text of the license can be found in the file LICENSE at
// the top level of the deal.II distribution.
//
// ---------------------------------------------------------------------
// this function tests the correctness of the implementation of parallel
// matrix free matrix-vector products for hp elements by comparing to the
// serial version
#include "../tests.h"
std::ofstream logfile("output");
#include "create_mesh.h"
#include "matrix_vector_common.h"
#include <deal.II/hp/dof_handler.h>
#include <deal.II/hp/fe_values.h>
#include <deal.II/base/function.h>
#include <deal.II/base/template_constraints.h>
template <int dim, typename Number>
class MatrixFreeTestHP
{
public:
MatrixFreeTestHP(const MatrixFree<dim,Number> &data_in):
data (data_in)
{};
void
local_apply (const MatrixFree<dim,Number> &data,
Vector<Number> &dst,
const Vector<Number> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
// Ask MatrixFree for cell_range for different
// orders
std::pair<unsigned int,unsigned int> subrange_deg;
#define CALL_METHOD(degree) \
subrange_deg = data.create_cell_subrange_hp(cell_range, degree); \
if (subrange_deg.second > subrange_deg.first) \
helmholtz_operator<dim,degree,Vector<Number>,degree+1> (data, dst, src, subrange_deg)
CALL_METHOD(1);
CALL_METHOD(2);
CALL_METHOD(3);
CALL_METHOD(4);
CALL_METHOD(5);
CALL_METHOD(6);
CALL_METHOD(7);
#undef CALL_METHOD
}
void vmult (Vector<Number> &dst,
const Vector<Number> &src) const
{
dst = 0;
data.cell_loop (&MatrixFreeTestHP<dim,Number>::local_apply, this, dst, src);
};
private:
const MatrixFree<dim,Number> &data;
};
template <int dim, typename number>
void do_test (const unsigned int parallel_option)
{
Triangulation<dim> tria;
create_mesh (tria);
// larger mesh in release mode
#ifndef DEBUG
tria.refine_global(2);
#endif
// refine a few cells
for (unsigned int i=0; i<11-3*dim; ++i)
{
typename Triangulation<dim>::active_cell_iterator
cell = tria.begin_active (),
endc = tria.end();
for (; cell!=endc; ++cell)
if (Testing::rand() % (7-i) == 0)
cell->set_refine_flag();
tria.execute_coarsening_and_refinement();
}
const unsigned int max_degree = 9-2*dim;
hp::FECollection<dim> fe_collection;
hp::QCollection<1> quadrature_collection_mf;
for (unsigned int deg=1; deg<=max_degree; ++deg)
{
fe_collection.push_back (FE_Q<dim>(QGaussLobatto<1>(deg+1)));
quadrature_collection_mf.push_back (QGauss<1>(deg+1));
}
hp::DoFHandler<dim> dof(tria);
// set the active FE index in a random order
{
typename hp::DoFHandler<dim>::active_cell_iterator
cell = dof.begin_active(),
endc = dof.end();
for (; cell!=endc; ++cell)
{
const unsigned int fe_index = Testing::rand() % max_degree;
cell->set_active_fe_index (fe_index);
}
}
// setup DoFs
dof.distribute_dofs(fe_collection);
ConstraintMatrix constraints;
DoFTools::make_hanging_node_constraints (dof,
constraints);
VectorTools::interpolate_boundary_values (dof,
0,
ZeroFunction<dim>(),
constraints);
constraints.close ();
//std::cout << "Number of cells: " << dof.get_triangulation().n_active_cells() << std::endl;
//std::cout << "Number of degrees of freedom: " << dof.n_dofs() << std::endl;
//std::cout << "Number of constraints: " << constraints.n_constraints() << std::endl;
// set up reference MatrixFree
MatrixFree<dim,number> mf_data;
typename MatrixFree<dim,number>::AdditionalData data;
data.tasks_parallel_scheme =
MatrixFree<dim,number>::AdditionalData::none;
mf_data.reinit (dof, constraints, quadrature_collection_mf, data);
MatrixFreeTestHP<dim,number> mf (mf_data);
// test different block sizes, starting from
// auto setting (= 0)
for (unsigned int block_size = 0; block_size < 5; ++block_size)
{
deallog.push ("blk_" + Utilities::int_to_string(block_size,1));
MatrixFree<dim,number> mf_data_par;
if (parallel_option == 0)
{
data.tasks_parallel_scheme =
MatrixFree<dim,number>::AdditionalData::partition_partition;
deallog << "Parallel option partition/partition" << std::endl;
}
else if (parallel_option == 1)
{
data.tasks_parallel_scheme =
MatrixFree<dim,number>::AdditionalData::partition_color;
deallog << "Parallel option partition/color" << std::endl;
}
else
{
data.tasks_parallel_scheme =
MatrixFree<dim,number>::AdditionalData::color;
deallog << "Parallel option partition/color" << std::endl;
}
data.tasks_block_size = 1;
mf_data_par.reinit (dof, constraints, quadrature_collection_mf, data);
MatrixFreeTestHP<dim,number> mf_par(mf_data_par);
// fill a right hand side vector with random
// numbers in unconstrained degrees of freedom
Vector<number> src (dof.n_dofs());
Vector<number> result_ref(src), result_mf (src);
for (unsigned int i=0; i<dof.n_dofs(); ++i)
{
if (constraints.is_constrained(i) == false)
src(i) = (double)Testing::rand()/RAND_MAX;
}
// now perform 30 matrix-vector products in
// parallel and check their correctness (take
// many of them to make sure that we hit an
// error)
mf.vmult (result_ref, src);
deallog << "Norm of difference: ";
for (unsigned int i=0; i<50; ++i)
{
mf_par.vmult (result_mf, src);
result_mf -= result_ref;
double diff_norm = result_mf.linfty_norm()/result_ref.linfty_norm();
deallog << diff_norm << " ";
}
deallog << std::endl << std::endl;
deallog.pop();
}
}
template <int dim, int fe_degree>
void test ()
{
// 'misuse' fe_degree for setting the parallel
// option here
unsigned int parallel_option = 0;
if (fe_degree == 1)
parallel_option = 0;
else if (fe_degree == 2)
parallel_option = 1;
else
return;
deallog.push("double");
deallog.threshold_double(1.e-12);
do_test<dim,double>(parallel_option);
deallog.pop();
deallog.push("float");
deallog.threshold_double(1.e-6);
do_test<dim,float>(parallel_option);
deallog.pop();
}
| lgpl-2.1 |
EzyWebwerkstaden/n2cms | src/Framework/N2/Edit/Versioning/InvalidVersionInfo.cs | 1908 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace N2.Edit.Versioning
{
public sealed class InvalidVersionInfo : VersionInfo
{
public InvalidVersionInfo()
{
Title = "Invalid version";
State = ContentState.None;
SavedBy = string.Empty;
}
//public new int ID
//{
// get { return 0; }
// set { throw new InvalidOperationException("Can't set the ID of the invalid version"); }
//}
//public new string Title
//{
// get { return "Invalid version"; }
// set { throw new InvalidOperationException("Can't set title of the invalid version"); }
//}
//public new ContentState State
//{
// get { return ContentState.None; }
// set { throw new InvalidOperationException(); }
//}
//public new string IconUrl {
// get { return null; }
// set { throw new InvalidOperationException(); }
//}
//public new DateTime? Published
//{
// get { return null; }
// set { throw new InvalidOperationException(); }
//}
//public new DateTime? FuturePublish
//{
// get { return null; }
// set { throw new InvalidOperationException(); }
//}
//public new DateTime? Expires
//{
// get { return null; }
// set { throw new InvalidOperationException(); }
//}
//public new int VersionIndex
//{
// get { return 0; }
// set { throw new InvalidOperationException(); }
//}
//public new int PartsCount
//{
// get { return 0; }
// set { throw new InvalidOperationException(); }
//}
//public new string SavedBy
//{
// get { return null; }
// set { throw new InvalidOperationException(); }
//}
public new ContentItem Content
{
get { return null; }
}
public new Func<ContentItem> ContentFactory { get; set; }
public Exception InnerException { get; set; }
}
}
| lgpl-2.1 |
LLNL/spack | var/spack/repos/builtin/packages/examl/package.py | 2255 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Examl(MakefilePackage):
"""
Exascale Maximum Likelihood (ExaML) code for phylogenetic inference
using MPI. This code implements the popular RAxML search algorithm
for maximum likelihood based inference of phylogenetic trees.
"""
homepage = "https://github.com/stamatak/ExaML"
url = "https://github.com/stamatak/ExaML/archive/v3.0.22.tar.gz"
maintainers = ['robqiao']
version('3.0.22', sha256='802e673b0c2ea83fdbe6b060048d83f22b6978933a04be64fb9b4334fe318ca3')
version('3.0.21', sha256='6c7e6c5d7bf4ab5cfbac5cc0d577885272a803c142e06b531693a6a589102e2e')
version('3.0.20', sha256='023681248bbc7f19821b509948d79301e46bbf275aa90bf12e9f4879639a023b')
version('3.0.19', sha256='3814230bf7578b8396731dc87ce665d0b1a671d8effd571f924c5b7936ae1c9e')
version('3.0.18', sha256='1bacb5124d943d921e7beae52b7062626d0ce3cf2f83e3aa3acf6ea26cf9cd87')
version('3.0.17', sha256='90a859e0b8fff697722352253e748f03c57b78ec5fbc1ae72f7e702d299dac67')
version('3.0.16', sha256='abc922994332d40892e30f077e4644db08cd59662da8e2a9197d1bd8bcb9aa5f')
version('3.0.15', sha256='da5e66a63d6fa34b640535c359d8daf67f23bd2fcc958ac604551082567906b0')
version('3.0.14', sha256='698b538996946ae23a2d6fa1e230c210832e59080da33679ff7d6b342a9e6180')
version('3.0.13', sha256='893aecb5545798235a17975aa07268693d3526d0aee0ed59a2d6e791248791ed')
variant('mpi', default=True, description='Enable MPI parallel support')
depends_on('mpi', when='+mpi')
def build(self, spec, prefix):
#####################
# Build Directories #
#####################
with working_dir('examl'):
make('-f', 'Makefile.SSE3.gcc')
with working_dir('parser'):
make('-f', 'Makefile.SSE3.gcc')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install("examl/examl", prefix.bin)
install("parser/parse-examl", prefix.bin)
install_tree("manual", prefix.manual)
install_tree("testData", prefix.testData)
| lgpl-2.1 |
elsiklab/intermine | bio/sources/human/atlas-express/main/src/org/intermine/bio/dataconversion/AtlasExpressConverter.java | 4538 | package org.intermine.bio.dataconversion;
/*
* Copyright (C) 2002-2017 FlyMine
*
* This code may be freely distributed and modified under the
* terms of the GNU Lesser General Public Licence. This should
* be distributed with the code. See the LICENSE file for more
* information or http://www.gnu.org/copyleft/lesser.html.
*
*/
import java.io.Reader;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.intermine.dataconversion.ItemWriter;
import org.intermine.metadata.Model;
import org.intermine.objectstore.ObjectStoreException;
import org.intermine.util.FormattedTextParser;
import org.intermine.xml.full.Item;
/**
* A data parser for illumina body map.
* @author Julie Sullivan
*/
public class AtlasExpressConverter extends BioFileConverter
{
private static final String DATASET_TITLE = "E-MTAB-513 illumina body map";
private static final String DATA_SOURCE_NAME = "ArrayExpress";
private Map<String, String> genes = new HashMap<String, String>();
protected IdResolver rslv;
private static final String TAXON_ID = "9606";
private static final Logger LOG = Logger.getLogger(AtlasExpressConverter.class);
private static final String EXPRESSION_TYPE = "FPKM value";
/**
* Constructor
* @param writer the ItemWriter used to handle the resultant items
* @param model the Model
*/
public AtlasExpressConverter(ItemWriter writer, Model model) {
super(writer, model, DATA_SOURCE_NAME, DATASET_TITLE);
if (rslv == null) {
rslv = IdResolverService.getIdResolverByOrganism(TAXON_ID);
}
}
/**
* Read Atlas Express TSV file.
*
* {@inheritDoc}
*/
@Override
public void process(Reader reader) throws Exception {
/* data has format
Gene ID Gene Name adipose adrenal brain breast colon heart kidney leukocyte
liver lung lymph node ovary prostate skeletal muscle testis thyroid
ENSG00000000003 TSPAN6 21 5 5 16 12 2 13 0.1 31
*/
Iterator<?> lineIter = FormattedTextParser.parseTabDelimitedReader(reader);
// parse header
String[] header = (String[]) lineIter.next();
// each gene is on a new line
while (lineIter.hasNext()) {
String[] line = (String[]) lineIter.next();
String geneId = getGeneId(line[0]);
if (StringUtils.isEmpty(geneId)) {
continue;
}
// each column represents a tissue
// skip first two columns, gene name
for (int i = 2; i < header.length; i++) {
String tissue = header[i];
String expression = line[i];
Item item = createItem("AtlasExpression");
item.setAttribute("type", EXPRESSION_TYPE);
item.setAttribute("condition", tissue);
item.setAttribute("expression", expression);
item.setReference("gene", geneId);
store(item);
}
}
}
private String getGeneId(String primaryIdentifier) throws ObjectStoreException {
String resolvedIdentifier = resolveGene(primaryIdentifier);
if (StringUtils.isEmpty(resolvedIdentifier)) {
return null;
}
String geneId = genes.get(resolvedIdentifier);
if (geneId == null) {
Item gene = createItem("Gene");
gene.setAttribute("primaryIdentifier", resolvedIdentifier);
gene.setReference("organism", getOrganism(TAXON_ID));
store(gene);
geneId = gene.getIdentifier();
genes.put(resolvedIdentifier, geneId);
}
return geneId;
}
private String resolveGene(String identifier) {
String id = identifier;
if (rslv != null && rslv.hasTaxon(TAXON_ID)) {
int resCount = rslv.countResolutions(TAXON_ID, identifier);
if (resCount != 1) {
LOG.info("RESOLVER: failed to resolve gene to one identifier, ignoring gene: "
+ identifier + " count: " + resCount + " Human identifier: "
+ rslv.resolveId(TAXON_ID, identifier));
return null;
}
id = rslv.resolveId(TAXON_ID, identifier).iterator().next();
}
return id;
}
}
| lgpl-2.1 |
lnu/nhibernate-core | src/NHibernate.Test/Async/NHSpecificTest/NH1507/Fixture.cs | 3420 | //------------------------------------------------------------------------------
// <auto-generated>
// This code was generated by AsyncGenerator.
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
// </auto-generated>
//------------------------------------------------------------------------------
using System;
using System.Collections;
using NHibernate.Driver;
using NUnit.Framework;
namespace NHibernate.Test.NHSpecificTest.NH1507
{
using System.Threading.Tasks;
[TestFixture]
public class FixtureAsync : BugTestCase
{
protected override bool AppliesTo(Engine.ISessionFactoryImplementor factory)
{
return !(factory.ConnectionProvider.Driver is OracleManagedDataClientDriver);
}
protected override void OnSetUp()
{
CreateData();
}
protected override void OnTearDown()
{
CleanupData();
}
private void CreateData()
{
//Employee
var emp = new Employee
{
Address = "Zombie street",
City = "Bitonto",
PostalCode = "66666",
FirstName = "tomb",
LastName = "mutilated"
};
//and his related orders
var order = new Order
{OrderDate = DateTime.Now, Employee = emp, ShipAddress = "dead zone 1", ShipCountry = "Deadville"};
var order2 = new Order
{OrderDate = DateTime.Now, Employee = emp, ShipAddress = "dead zone 2", ShipCountry = "Deadville"};
//Employee with no related orders but with same PostalCode
var emp2 = new Employee
{
Address = "Gut street",
City = "Mariotto",
Country = "Arised",
PostalCode = "66666",
FirstName = "carcass",
LastName = "purulent"
};
//Order with no related employee but with same ShipCountry
var order3 = new Order {OrderDate = DateTime.Now, ShipAddress = "dead zone 2", ShipCountry = "Deadville"};
using (ISession session = OpenSession())
{
using (ITransaction tx = session.BeginTransaction())
{
session.Save(emp);
session.Save(emp2);
session.Save(order);
session.Save(order2);
session.Save(order3);
tx.Commit();
}
}
}
private void CleanupData()
{
using (ISession session = OpenSession())
{
using (ITransaction tx = session.BeginTransaction())
{
//delete empolyee and related orders
session.Delete("from Employee ee where ee.PostalCode = '66666'");
//delete order not related to employee
session.Delete("from Order oo where oo.ShipCountry = 'Deadville'");
tx.Commit();
}
}
}
[Test]
public async Task ExplicitJoinAsync()
{
using (ISession session = OpenSession())
{
//explicit join
IList results =
await (session.CreateQuery("select count(*) from Order as entity join entity.Employee ee "
+ "where ee.PostalCode='66666' or entity.ShipCountry='Deadville'").ListAsync());
//Debug.Assert(list[0].Equals(191), "Wrong number of orders, returned: " + list[0].ToString());
Assert.AreEqual(2, results[0]);
}
}
[Test]
public async Task ImplicitJoinFailingTestAsync()
{
using (ISession session = OpenSession())
{
//implicit join
IList results =
await (session.CreateQuery("select count(*) from Order as entity "
+ "where entity.Employee.PostalCode='66666' or entity.ShipCountry='Deadville'").ListAsync());
Assert.AreEqual(2, results[0]);
}
}
}
} | lgpl-2.1 |
hellcoderz/thebeast | src/thebeast/pml/parser/ParserShift.java | 322 | package thebeast.pml.parser;
/**
* @author Sebastian Riedel
*/
public class ParserShift extends ParserStatement {
public final int delta;
public ParserShift(int delta) {
this.delta = delta;
}
public void acceptParserStatementVisitor(ParserStatementVisitor visitor) {
visitor.visitShift(this);
}
}
| lgpl-3.0 |
MinetestForFun/server-minetestforfun | mods/plantlife_modpack/dryplants/juncus.lua | 3965 | -----------------------------------------------------------------------------------------------
-- Grasses - Juncus 0.0.5
-----------------------------------------------------------------------------------------------
-- by Mossmanikin
-- textures & ideas partly by Neuromancer
-- License (everything): WTFPL
-- Contains code from: biome_lib
-- Looked at code from: default
-----------------------------------------------------------------------------------------------
abstract_dryplants.grow_juncus = function(pos)
local juncus_type = math.random(2,3)
local right_here = {x=pos.x, y=pos.y+1, z=pos.z}
if minetest.get_node(right_here).name == "air" -- instead of check_air = true,
or minetest.get_node(right_here).name == "default:junglegrass" then
if juncus_type == 2 then
minetest.set_node(right_here, {name="dryplants:juncus_02"})
else
minetest.set_node(right_here, {name="dryplants:juncus"})
end
end
end
minetest.register_node("dryplants:juncus", {
description = "Juncus",
drawtype = "plantlike",
visual_scale = 2,
paramtype = "light",
tiles = {"dryplants_juncus_03.png"},
inventory_image = "dryplants_juncus_inv.png",
walkable = false,
buildable_to = true,
groups = {
snappy=3,
flammable=2,
attached_node=1,
flora=1
--not_in_creative_inventory=1
},
sounds = default.node_sound_leaves_defaults(),
selection_box = {
type = "fixed",
fixed = {-7/16, -1/2, -7/16, 7/16, 0, 7/16},
},
on_place = function(itemstack, placer, pointed_thing)
local playername = placer:get_player_name()
if minetest.is_protected(pointed_thing.above, playername) or
minetest.is_protected(pointed_thing.under, playername) then
minetest.chat_send_player(playername, "Someone else owns that spot.")
return
end
local pos = pointed_thing.under
local juncus_type = math.random(2,3)
local right_here = {x=pos.x, y=pos.y+1, z=pos.z}
if juncus_type == 2 then
minetest.set_node(right_here, {name="dryplants:juncus_02"})
else
minetest.set_node(right_here, {name="dryplants:juncus"})
end
if not minetest.setting_getbool("creative_mode") then
itemstack:take_item()
end
return itemstack
end,
})
minetest.register_node("dryplants:juncus_02", {
description = "Juncus",
drawtype = "plantlike",
visual_scale = 2,
paramtype = "light",
tiles = {"dryplants_juncus_02.png"},
walkable = false,
buildable_to = true,
groups = {
snappy=3,
flammable=2,
attached_node=1,
flora=1,
not_in_creative_inventory=1
},
sounds = default.node_sound_leaves_defaults(),
selection_box = {
type = "fixed",
fixed = {-7/16, -1/2, -7/16, 7/16, 0, 7/16},
},
drop = "dryplants:juncus",
})
-----------------------------------------------------------------------------------------------
-- GENERATE SMALL JUNCUS
-----------------------------------------------------------------------------------------------
-- near water or swamp
biome_lib:register_generate_plant({
surface = {
"default:dirt_with_grass",
--"default:desert_sand",
--"default:sand",
"stoneage:grass_with_silex",
"sumpf:peat",
"sumpf:sumpf"
},
max_count = JUNCUS_NEAR_WATER_PER_MAPBLOCK,
rarity = 101 - JUNCUS_NEAR_WATER_RARITY,
min_elevation = 1, -- above sea level
near_nodes = {"default:water_source","sumpf:dirtywater_source","sumpf:sumpf"},
near_nodes_size = 2,
near_nodes_vertical = 1,
near_nodes_count = 1,
plantlife_limit = -0.9,
},
abstract_dryplants.grow_juncus
)
-- at dunes/beach
biome_lib:register_generate_plant({
surface = {
--"default:dirt_with_grass",
--"default:desert_sand",
"default:sand",
--"stoneage:grass_with_silex",
--"sumpf:peat",
--"sumpf:sumpf"
},
max_count = JUNCUS_AT_BEACH_PER_MAPBLOCK,
rarity = 101 - JUNCUS_AT_BEACH_RARITY,
min_elevation = 1, -- above sea level
near_nodes = {"default:dirt_with_grass"},
near_nodes_size = 2,
near_nodes_vertical = 1,
near_nodes_count = 1,
plantlife_limit = -0.9,
},
abstract_dryplants.grow_juncus
)
| unlicense |
graydon/rust | src/test/ui/rfc-2632-const-trait-impl/inherent-impl.rs | 218 | #![feature(const_trait_impl)]
#![allow(bare_trait_objects)]
struct S;
trait T {}
impl const S {}
//~^ ERROR inherent impls cannot be `const`
impl const T {}
//~^ ERROR inherent impls cannot be `const`
fn main() {}
| apache-2.0 |
jamesyong/o3erp | java/framework/webapp/src/org/ofbiz/webapp/event/RomeEventHandler.java | 4009 | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package org.ofbiz.webapp.event;
import java.io.IOException;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.ofbiz.webapp.control.ConfigXMLReader;
import org.ofbiz.webapp.control.RequestHandler;
import org.ofbiz.webapp.control.ConfigXMLReader.Event;
import org.ofbiz.webapp.control.ConfigXMLReader.RequestMap;
import com.sun.syndication.feed.WireFeed;
import com.sun.syndication.io.FeedException;
import com.sun.syndication.io.WireFeedOutput;
/**
* RomeEventHandler
*/
public class RomeEventHandler implements EventHandler {
public static final String module = RomeEventHandler.class.getName();
public static final String mime = "application/xml; charset=UTF-8";
public static final String defaultFeedType = "rss_2.0";
protected RequestHandler handler;
protected ServletContext context;
protected EventHandler service;
protected WireFeedOutput out;
public void init(ServletContext context) throws EventHandlerException {
this.context = context;
this.handler = (RequestHandler) context.getAttribute("_REQUEST_HANDLER_");
if (this.handler == null) {
throw new EventHandlerException("No request handler found in servlet context!");
}
// get the service event handler
this.service = new ServiceEventHandler();
this.service.init(context);
this.out = new WireFeedOutput();
}
/**
* @see org.ofbiz.webapp.event.EventHandler#invoke(ConfigXMLReader.Event, ConfigXMLReader.RequestMap, javax.servlet.http.HttpServletRequest, javax.servlet.http.HttpServletResponse)
*/
public String invoke(Event event, RequestMap requestMap, HttpServletRequest request, HttpServletResponse response) throws EventHandlerException {
// generate the main and entry links
String entryLinkReq = request.getParameter("entryLinkReq");
String mainLinkReq = request.getParameter("mainLinkReq");
// create the links; but the query string must be created by the service
String entryLink = handler.makeLink(request, response, entryLinkReq, true, false, false);
String mainLink = handler.makeLink(request, response, mainLinkReq, true, false, false);
request.setAttribute("entryLink", entryLink);
request.setAttribute("mainLink", mainLink);
String feedType = request.getParameter("feedType");
if (feedType == null) {
request.setAttribute("feedType", defaultFeedType);
}
// invoke the feed generator service (implements rssFeedInterface)
String respCode = service.invoke(event, requestMap, request, response);
// pull out the RSS feed from the request attributes
WireFeed wireFeed = (WireFeed) request.getAttribute("wireFeed");
response.setContentType(mime);
try {
out.output(wireFeed, response.getWriter());
} catch (IOException e) {
throw new EventHandlerException("Unable to get response writer", e);
} catch (FeedException e) {
throw new EventHandlerException("Unable to write RSS feed", e);
}
return respCode;
}
}
| apache-2.0 |
robin13/elasticsearch | server/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java | 5029 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.action.bulk;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.delete.DeleteResponseTests;
import org.elasticsearch.action.index.IndexResponseTests;
import org.elasticsearch.action.update.UpdateResponseTests;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import static org.elasticsearch.ElasticsearchExceptionTests.randomExceptions;
import static org.elasticsearch.action.bulk.BulkItemResponseTests.assertBulkItemResponse;
import static org.elasticsearch.action.bulk.BulkResponse.NO_INGEST_TOOK;
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
public class BulkResponseTests extends ESTestCase {
public void testToAndFromXContent() throws IOException {
XContentType xContentType = randomFrom(XContentType.values());
boolean humanReadable = randomBoolean();
long took = randomFrom(randomNonNegativeLong(), -1L);
long ingestTook = randomFrom(randomNonNegativeLong(), NO_INGEST_TOOK);
int nbBulkItems = randomIntBetween(1, 10);
BulkItemResponse[] bulkItems = new BulkItemResponse[nbBulkItems];
BulkItemResponse[] expectedBulkItems = new BulkItemResponse[nbBulkItems];
for (int i = 0; i < nbBulkItems; i++) {
DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values());
if (frequently()) {
Tuple<? extends DocWriteResponse, ? extends DocWriteResponse> randomDocWriteResponses = null;
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
randomDocWriteResponses = IndexResponseTests.randomIndexResponse();
} else if (opType == DocWriteRequest.OpType.DELETE) {
randomDocWriteResponses = DeleteResponseTests.randomDeleteResponse();
} else if (opType == DocWriteRequest.OpType.UPDATE) {
randomDocWriteResponses = UpdateResponseTests.randomUpdateResponse(xContentType);
} else {
fail("Test does not support opType [" + opType + "]");
}
bulkItems[i] = new BulkItemResponse(i, opType, randomDocWriteResponses.v1());
expectedBulkItems[i] = new BulkItemResponse(i, opType, randomDocWriteResponses.v2());
} else {
String index = randomAlphaOfLength(5);
String id = randomAlphaOfLength(5);
Tuple<Throwable, ElasticsearchException> failures = randomExceptions();
Exception bulkItemCause = (Exception) failures.v1();
bulkItems[i] = new BulkItemResponse(i, opType,
new BulkItemResponse.Failure(index, id, bulkItemCause));
expectedBulkItems[i] = new BulkItemResponse(i, opType,
new BulkItemResponse.Failure(index, id, failures.v2(), ExceptionsHelper.status(bulkItemCause)));
}
}
BulkResponse bulkResponse = new BulkResponse(bulkItems, took, ingestTook);
BytesReference originalBytes = toShuffledXContent(bulkResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable);
BulkResponse parsedBulkResponse;
try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) {
parsedBulkResponse = BulkResponse.fromXContent(parser);
assertNull(parser.nextToken());
}
assertEquals(took, parsedBulkResponse.getTook().getMillis());
assertEquals(ingestTook, parsedBulkResponse.getIngestTookInMillis());
assertEquals(expectedBulkItems.length, parsedBulkResponse.getItems().length);
for (int i = 0; i < expectedBulkItems.length; i++) {
assertBulkItemResponse(expectedBulkItems[i], parsedBulkResponse.getItems()[i]);
}
BytesReference finalBytes = toXContent(parsedBulkResponse, xContentType, humanReadable);
BytesReference expectedFinalBytes = toXContent(parsedBulkResponse, xContentType, humanReadable);
assertToXContentEquivalent(expectedFinalBytes, finalBytes, xContentType);
}
}
| apache-2.0 |
SAP/openui5 | src/sap.ui.codeeditor/src/sap/ui/codeeditor/js/ace/snippets/stylus.js | 332 |
; (function() {
ace.require(["ace/snippets/stylus"], function(m) {
if (typeof module == "object" && typeof exports == "object" && module) {
module.exports = m;
}
});
})();
| apache-2.0 |
nemanja88/azure-powershell | src/ResourceManager/Insights/Commands.Insights.Test/Diagnostics/GetDiagnosticSettingCommandTests.cs | 4730 | // ----------------------------------------------------------------------------------
//
// Copyright Microsoft Corporation
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------------------------------------------------------------
using Microsoft.Azure.Commands.Insights.Diagnostics;
using Microsoft.Azure.Management.Insights;
using Microsoft.Azure.Management.Insights.Models;
using Microsoft.WindowsAzure.Commands.ScenarioTest;
using Moq;
using System;
using System.Collections.Generic;
using System.Management.Automation;
using System.Net;
using System.Threading;
using System.Threading.Tasks;
using Xunit;
namespace Microsoft.Azure.Commands.Insights.Test.Diagnostics
{
public class GetDiagnosticSettingCommandTests
{
private readonly GetAzureRmDiagnosticSettingCommand cmdlet;
private readonly Mock<InsightsManagementClient> insightsManagementClientMock;
private readonly Mock<IServiceDiagnosticSettingsOperations> insightsDiagnosticsOperationsMock;
private Mock<ICommandRuntime> commandRuntimeMock;
private ServiceDiagnosticSettingsGetResponse response;
private const string resourceId = "/subscriptions/123/resourcegroups/rg/providers/rp/resource/myresource";
private string calledResourceId;
public GetDiagnosticSettingCommandTests(Xunit.Abstractions.ITestOutputHelper output)
{
ServiceManagemenet.Common.Models.XunitTracingInterceptor.AddToContext(new ServiceManagemenet.Common.Models.XunitTracingInterceptor(output));
insightsDiagnosticsOperationsMock = new Mock<IServiceDiagnosticSettingsOperations>();
insightsManagementClientMock = new Mock<InsightsManagementClient>();
commandRuntimeMock = new Mock<ICommandRuntime>();
cmdlet = new GetAzureRmDiagnosticSettingCommand()
{
CommandRuntime = commandRuntimeMock.Object,
InsightsManagementClient = insightsManagementClientMock.Object
};
response = new ServiceDiagnosticSettingsGetResponse
{
RequestId = Guid.NewGuid().ToString(),
StatusCode = HttpStatusCode.OK,
Properties = new ServiceDiagnosticSettings
{
StorageAccountId = "/subscriptions/123/resourcegroups/rg/providers/microsoft.storage/accounts/myaccount",
Logs = new List<LogSettings>
{
new LogSettings
{
Category = "TestCategory1",
Enabled = true
},
new LogSettings
{
Category = "TestCategory2",
Enabled = false
}
},
Metrics = new List<MetricSettings>
{
new MetricSettings
{
TimeGrain = TimeSpan.FromMinutes(1),
Enabled = false
},
new MetricSettings
{
TimeGrain = TimeSpan.FromHours(1)
}
}
}
};
insightsDiagnosticsOperationsMock.Setup(f => f.GetAsync(It.IsAny<string>(), It.IsAny<CancellationToken>()))
.Returns(Task.FromResult<ServiceDiagnosticSettingsGetResponse>(response))
.Callback((string resourceId, CancellationToken cancellationToken) =>
{
this.calledResourceId = resourceId;
});
insightsManagementClientMock.SetupGet(f => f.ServiceDiagnosticSettingsOperations).Returns(this.insightsDiagnosticsOperationsMock.Object);
}
[Fact]
[Trait(Category.AcceptanceType, Category.CheckIn)]
public void AddAlertRuleCommandParametersProcessing()
{
cmdlet.ResourceId = resourceId;
cmdlet.ExecuteCmdlet();
Assert.Equal(resourceId, calledResourceId);
}
}
}
| apache-2.0 |
Rajith90/carbon-apimgt | features/apimgt/org.wso2.carbon.apimgt.store.feature/src/main/resources/devportal/source/src/app/data/Wsdl.js | 1706 | /**
* Copyright (c) 2019, WSO2 Inc. (http://wso2.com) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import APIClientFactory from './APIClientFactory';
import Utils from './Utils';
import Resource from './Resource';
/**
* API client for WSDL related operations
*/
class Wsdl extends Resource {
/**
* Constructor of the WSDL API client
* @param {*} client SwaggerClient object
*/
constructor(client = null) {
super();
if (client == null) {
this.apiClient = new APIClientFactory().getAPIClient(Utils.getCurrentEnvironment()).client;
} else {
this.apiClient = client;
}
}
/**
* Download the WSDL of an API for the given gateway environment
*
* @static
* @param {string} apiId API UUID
* @param {string} environmentName name of the gateway environment
* @returns {*} WSDL validation response
* @memberof Wsdl
*/
downloadWSDLForEnvironment(apiId, environmentName = null) {
return this.apiClient.then((client) => {
return client.apis.APIs.getWSDLOfAPI({ apiId, environmentName });
});
}
}
export default Wsdl;
| apache-2.0 |
ochaloup/creaper | testsuite/standalone/src/test/java/org/wildfly/extras/creaper/commands/web/AddConnectorOnlineTest.java | 3360 | package org.wildfly.extras.creaper.commands.web;
import org.jboss.arquillian.junit.Arquillian;
import org.wildfly.extras.creaper.core.CommandFailedException;
import org.wildfly.extras.creaper.core.ManagementClient;
import org.wildfly.extras.creaper.core.online.CliException;
import org.wildfly.extras.creaper.core.online.ModelNodeResult;
import org.wildfly.extras.creaper.core.online.OnlineManagementClient;
import org.wildfly.extras.creaper.core.online.OnlineOptions;
import org.wildfly.extras.creaper.core.online.operations.Address;
import org.wildfly.extras.creaper.core.online.operations.OperationException;
import org.wildfly.extras.creaper.core.online.operations.Operations;
import org.wildfly.extras.creaper.core.online.operations.admin.Administration;
import org.wildfly.extras.creaper.test.AS7Tests;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
@Category(AS7Tests.class)
@RunWith(Arquillian.class)
public class AddConnectorOnlineTest {
private static final String TEST_CONNECTOR_NAME = "test-http";
private OnlineManagementClient client;
private Operations ops;
private Administration admin;
@Before
public void connect() throws IOException {
client = ManagementClient.online(OnlineOptions.standalone().localDefault().build());
ops = new Operations(client);
admin = new Administration(client);
}
@After
public void close() throws IOException, CliException, OperationException, TimeoutException, InterruptedException {
ops.removeIfExists(Address.subsystem("web").and("connector", TEST_CONNECTOR_NAME));
admin.reloadIfRequired();
client.close();
}
@Test
public void addSimpleConnector_commandSucceeds() throws CommandFailedException, IOException {
client.apply(new AddConnector.Builder(TEST_CONNECTOR_NAME)
.protocol("HTTP/1.1")
.scheme("http")
.socketBinding("http")
.enabled(false)
.build());
ModelNodeResult result = ops.readAttribute(Address.subsystem("web").and("connector", TEST_CONNECTOR_NAME),
"scheme");
result.assertSuccess();
}
@Test
public void addConnectorWithAllAttributes_commandSucceeds() throws CommandFailedException, IOException {
client.apply(new AddConnector.Builder(TEST_CONNECTOR_NAME)
.protocol("HTTP/1.1")
.scheme("http")
.socketBinding("http")
.enabled(false)
.enableLookups(false)
.maxConnections(10)
.maxPostSize(20)
.maxSavePostSize(20)
.proxyBinding("test-binding")
.proxyName("test-proxy")
.proxyPort(7000)
.redirectBinding("https")
.redirectPort(8443)
.secure(false)
.virtualServer("default-host")
.executor("test-executor")
.build());
ModelNodeResult result = ops.readAttribute(Address.subsystem("web").and("connector", TEST_CONNECTOR_NAME),
"scheme");
result.assertSuccess();
}
}
| apache-2.0 |
minestarks/TypeScript | tests/cases/conformance/classes/members/privateNames/privateNameDeclarationMerging.ts | 207 | // @target: es6
class D {};
class C {
#x;
foo () {
const c = new C();
c.#x; // OK
const d: D = new C();
d.#x; // Error
}
}
interface C {
new (): D;
}
| apache-2.0 |
kaibozhou/flink | flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/ExecutionGraphSuspendTest.java | 10258 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.executiongraph;
import org.apache.flink.api.common.JobStatus;
import org.apache.flink.runtime.concurrent.ComponentMainThreadExecutorServiceAdapter;
import org.apache.flink.runtime.execution.ExecutionState;
import org.apache.flink.runtime.executiongraph.restart.FixedDelayRestartStrategy;
import org.apache.flink.runtime.executiongraph.restart.InfiniteDelayRestartStrategy;
import org.apache.flink.runtime.executiongraph.utils.SimpleSlotProvider;
import org.apache.flink.runtime.jobgraph.JobVertex;
import org.apache.flink.runtime.jobmanager.slots.TaskManagerGateway;
import org.apache.flink.runtime.jobmaster.slotpool.SlotProvider;
import org.apache.flink.runtime.testtasks.NoOpInvokable;
import org.apache.flink.util.TestLogger;
import org.junit.Test;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
/**
* Validates that suspending out of various states works correctly.
*/
public class ExecutionGraphSuspendTest extends TestLogger {
/**
* Going into SUSPENDED out of CREATED should immediately cancel everything and
* not send out RPC calls.
*/
@Test
public void testSuspendedOutOfCreated() throws Exception {
final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway();
final int parallelism = 10;
final ExecutionGraph eg = createExecutionGraph(gateway, parallelism);
assertEquals(JobStatus.CREATED, eg.getState());
// suspend
eg.suspend(new Exception("suspend"));
assertEquals(JobStatus.SUSPENDED, eg.getState());
validateAllVerticesInState(eg, ExecutionState.CANCELED);
validateCancelRpcCalls(gateway, 0);
ensureCannotLeaveSuspendedState(eg, gateway);
}
/**
* Going into SUSPENDED out of DEPLOYING vertices should cancel all vertices once with RPC calls.
*/
@Test
public void testSuspendedOutOfDeploying() throws Exception {
final int parallelism = 10;
final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway(parallelism);
final ExecutionGraph eg = createExecutionGraph(gateway, parallelism);
eg.scheduleForExecution();
assertEquals(JobStatus.RUNNING, eg.getState());
validateAllVerticesInState(eg, ExecutionState.DEPLOYING);
// suspend
eg.suspend(new Exception("suspend"));
assertEquals(JobStatus.SUSPENDED, eg.getState());
validateCancelRpcCalls(gateway, parallelism);
ensureCannotLeaveSuspendedState(eg, gateway);
}
/**
* Going into SUSPENDED out of RUNNING vertices should cancel all vertices once with RPC calls.
*/
@Test
public void testSuspendedOutOfRunning() throws Exception {
final int parallelism = 10;
final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway(parallelism);
final ExecutionGraph eg = createExecutionGraph(gateway, parallelism);
eg.scheduleForExecution();
ExecutionGraphTestUtils.switchAllVerticesToRunning(eg);
assertEquals(JobStatus.RUNNING, eg.getState());
validateAllVerticesInState(eg, ExecutionState.RUNNING);
// suspend
eg.suspend(new Exception("suspend"));
assertEquals(JobStatus.SUSPENDED, eg.getState());
validateCancelRpcCalls(gateway, parallelism);
ensureCannotLeaveSuspendedState(eg, gateway);
}
/**
* Suspending from FAILING goes to SUSPENDED and sends no additional RPC calls.
*/
@Test
public void testSuspendedOutOfFailing() throws Exception {
final int parallelism = 10;
final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway(parallelism);
final ExecutionGraph eg = createExecutionGraph(gateway, parallelism);
eg.scheduleForExecution();
ExecutionGraphTestUtils.switchAllVerticesToRunning(eg);
eg.failGlobal(new Exception("fail global"));
assertEquals(JobStatus.FAILING, eg.getState());
validateCancelRpcCalls(gateway, parallelism);
// suspend
eg.suspend(new Exception("suspend"));
assertEquals(JobStatus.SUSPENDED, eg.getState());
ensureCannotLeaveSuspendedState(eg, gateway);
}
/**
* Suspending from FAILED should do nothing.
*/
@Test
public void testSuspendedOutOfFailed() throws Exception {
final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway();
final int parallelism = 10;
final ExecutionGraph eg = createExecutionGraph(gateway, parallelism);
eg.scheduleForExecution();
ExecutionGraphTestUtils.switchAllVerticesToRunning(eg);
eg.failGlobal(new Exception("fail global"));
assertEquals(JobStatus.FAILING, eg.getState());
validateCancelRpcCalls(gateway, parallelism);
ExecutionGraphTestUtils.completeCancellingForAllVertices(eg);
assertEquals(JobStatus.FAILED, eg.getState());
// suspend
eg.suspend(new Exception("suspend"));
// still in failed state
assertEquals(JobStatus.FAILED, eg.getState());
validateCancelRpcCalls(gateway, parallelism);
}
/**
* Suspending from CANCELING goes to SUSPENDED and sends no additional RPC calls.
*/
@Test
public void testSuspendedOutOfCanceling() throws Exception {
final int parallelism = 10;
final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway(parallelism);
final ExecutionGraph eg = createExecutionGraph(gateway, parallelism);
eg.scheduleForExecution();
ExecutionGraphTestUtils.switchAllVerticesToRunning(eg);
eg.cancel();
assertEquals(JobStatus.CANCELLING, eg.getState());
validateCancelRpcCalls(gateway, parallelism);
// suspend
eg.suspend(new Exception("suspend"));
assertEquals(JobStatus.SUSPENDED, eg.getState());
ensureCannotLeaveSuspendedState(eg, gateway);
}
/**
* Suspending from CANCELLED should do nothing.
*/
@Test
public void testSuspendedOutOfCanceled() throws Exception {
final InteractionsCountingTaskManagerGateway gateway = new InteractionsCountingTaskManagerGateway();
final int parallelism = 10;
final ExecutionGraph eg = createExecutionGraph(gateway, parallelism);
eg.scheduleForExecution();
ExecutionGraphTestUtils.switchAllVerticesToRunning(eg);
eg.cancel();
assertEquals(JobStatus.CANCELLING, eg.getState());
validateCancelRpcCalls(gateway, parallelism);
ExecutionGraphTestUtils.completeCancellingForAllVertices(eg);
assertEquals(JobStatus.CANCELED, eg.getTerminationFuture().get());
// suspend
eg.suspend(new Exception("suspend"));
// still in failed state
assertEquals(JobStatus.CANCELED, eg.getState());
validateCancelRpcCalls(gateway, parallelism);
}
/**
* Tests that we can suspend a job when in state RESTARTING.
*/
@Test
public void testSuspendWhileRestarting() throws Exception {
final ExecutionGraph eg = ExecutionGraphTestUtils.createSimpleTestGraph(new InfiniteDelayRestartStrategy(10));
eg.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());
eg.scheduleForExecution();
assertEquals(JobStatus.RUNNING, eg.getState());
ExecutionGraphTestUtils.switchAllVerticesToRunning(eg);
eg.failGlobal(new Exception("test"));
assertEquals(JobStatus.FAILING, eg.getState());
ExecutionGraphTestUtils.completeCancellingForAllVertices(eg);
assertEquals(JobStatus.RESTARTING, eg.getState());
final Exception exception = new Exception("Suspended");
eg.suspend(exception);
assertEquals(JobStatus.SUSPENDED, eg.getState());
assertEquals(exception, eg.getFailureCause());
}
// ------------------------------------------------------------------------
// utilities
// ------------------------------------------------------------------------
private static void ensureCannotLeaveSuspendedState(ExecutionGraph eg, InteractionsCountingTaskManagerGateway gateway) {
gateway.waitUntilAllTasksAreSubmitted();
assertEquals(JobStatus.SUSPENDED, eg.getState());
gateway.resetCounts();
eg.failGlobal(new Exception("fail"));
assertEquals(JobStatus.SUSPENDED, eg.getState());
validateNoInteractions(gateway);
eg.cancel();
assertEquals(JobStatus.SUSPENDED, eg.getState());
validateNoInteractions(gateway);
eg.suspend(new Exception("suspend again"));
assertEquals(JobStatus.SUSPENDED, eg.getState());
validateNoInteractions(gateway);
for (ExecutionVertex ev : eg.getAllExecutionVertices()) {
assertEquals(0, ev.getCurrentExecutionAttempt().getAttemptNumber());
}
}
private static void validateNoInteractions(InteractionsCountingTaskManagerGateway gateway) {
assertThat(gateway.getInteractionsCount(), is(0));
}
private static void validateAllVerticesInState(ExecutionGraph eg, ExecutionState expected) {
for (ExecutionVertex ev : eg.getAllExecutionVertices()) {
assertEquals(expected, ev.getCurrentExecutionAttempt().getState());
}
}
private static void validateCancelRpcCalls(InteractionsCountingTaskManagerGateway gateway, int num) {
assertThat(gateway.getCancelTaskCount(), is(num));
}
private static ExecutionGraph createExecutionGraph(TaskManagerGateway gateway, int parallelism) throws Exception {
final JobVertex vertex = new JobVertex("vertex");
vertex.setInvokableClass(NoOpInvokable.class);
vertex.setParallelism(parallelism);
final SlotProvider slotProvider = new SimpleSlotProvider(parallelism, gateway);
ExecutionGraph simpleTestGraph = ExecutionGraphTestUtils.createSimpleTestGraph(
slotProvider,
new FixedDelayRestartStrategy(0, 0),
vertex);
simpleTestGraph.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());
return simpleTestGraph;
}
}
| apache-2.0 |
taoyunxing/trafficserver | proxy/hdrs/HdrUtils.cc | 4111 | /** @file
A brief file description
@section license License
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/****************************************************************************
HdrUtils.cc
Description: Convenience routines for dealing with hdrs and
values
****************************************************************************/
#include "ts/ink_platform.h"
#include "HdrUtils.h"
#define GETNEXT() \
{ \
cur += 1; \
if (cur >= end) { \
goto done; \
} \
}
void
HdrCsvIter::find_csv()
{
const char *cur, *end, *last_data, *csv_start;
RETRY:
cur = m_csv_start;
end = m_value_start + m_value_len;
last_data = nullptr;
csv_start = nullptr;
if (cur >= end) {
goto done;
}
skip_leading_whitespace:
if (ParseRules::is_ws(*cur)) {
GETNEXT();
goto skip_leading_whitespace;
}
csv_start = cur;
parse_value:
// ink_assert((',' > '"') && (',' > ' ') && (',' > '\t'));
// Cookie/Set-Cookie use ';' as the separator
if (m_separator == ',') {
while ((cur < end - 1) && (*cur > ',')) {
last_data = cur;
cur++;
}
}
if (*cur == m_separator) {
goto done;
}
if (*cur == '\"') {
// If the quote come before any text
// skip it
if (cur == csv_start) {
csv_start++;
}
GETNEXT();
goto parse_value_quote;
}
if (!ParseRules::is_ws(*cur)) {
last_data = cur;
}
GETNEXT();
goto parse_value;
parse_value_quote:
if ((*cur == '\"') && (cur[-1] != '\\')) {
GETNEXT();
goto parse_value;
}
last_data = cur;
GETNEXT();
goto parse_value_quote;
done:
m_csv_end = cur;
m_csv_start = csv_start;
if (last_data) {
m_csv_len = (int)(last_data - csv_start) + 1;
} else {
// Nothing found. See if there is another
// field in the dup list
if (m_cur_field->m_next_dup && m_follow_dups) {
field_init(m_cur_field->m_next_dup);
goto RETRY;
}
m_csv_len = 0;
}
}
const char *
HdrCsvIter::get_nth(MIMEField *field, int *len, int n, bool follow_dups)
{
const char *s;
int i, l;
ink_assert(n >= 0);
i = 0;
s = get_first(field, &l, follow_dups); // get index 0
while (s && (n > i)) {
s = get_next(&l);
i++;
} // if had index i, but want n > i, get next
*len = (s ? l : 0); // length is zero if NULL string
return (s);
}
int
HdrCsvIter::count_values(MIMEField *field, bool follow_dups)
{
const char *s;
int count, l;
count = 0;
s = get_first(field, &l, follow_dups); // get index 0
while (s) {
s = get_next(&l);
++count;
} // get next
return (count);
}
/*
int main() {
char* tests[] = {"\"I\", \"hate\", \"strings\"",
"This, is a , test",
"a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p",
"\"This is,\" a test"
};
for (int i = 0; i < 4; i++) {
printf ("Testing: %s\n", tests[i]);
HdrCsvIter iter;
int len;
const char* v = iter.get_first(tests[i], strlen(tests[i]), &len);
while (v) {
char* str_v = (char*)ats_malloc(len+1);
memcpy(str_v, v, len);
str_v[len] = '\0';
printf ("%s|", str_v);
v = iter.get_next(&len);
}
printf("\n\n");
}
}
*/
| apache-2.0 |
cloudcache/zstack | test/src/test/java/org/zstack/test/cascade/TestCascadeDeletion4.java | 4059 | package org.zstack.test.cascade;
import junit.framework.Assert;
import org.junit.Before;
import org.junit.Test;
import org.zstack.core.cloudbus.CloudBus;
import org.zstack.core.componentloader.ComponentLoader;
import org.zstack.core.db.DatabaseFacade;
import org.zstack.header.cluster.ClusterEO;
import org.zstack.header.cluster.ClusterVO;
import org.zstack.header.configuration.DiskOfferingInventory;
import org.zstack.header.configuration.DiskOfferingVO;
import org.zstack.header.configuration.InstanceOfferingInventory;
import org.zstack.header.configuration.InstanceOfferingVO;
import org.zstack.header.host.HostEO;
import org.zstack.header.host.HostVO;
import org.zstack.header.network.l2.L2NetworkEO;
import org.zstack.header.network.l3.IpRangeEO;
import org.zstack.header.network.l3.IpRangeVO;
import org.zstack.header.network.l2.L2NetworkVO;
import org.zstack.header.network.l3.L3NetworkEO;
import org.zstack.header.network.l3.L3NetworkVO;
import org.zstack.header.storage.backup.BackupStorageInventory;
import org.zstack.header.storage.backup.BackupStorageVO;
import org.zstack.header.storage.primary.PrimaryStorageEO;
import org.zstack.header.storage.primary.PrimaryStorageVO;
import org.zstack.header.vm.VmInstanceEO;
import org.zstack.header.vm.VmInstanceInventory;
import org.zstack.header.vm.VmInstanceVO;
import org.zstack.header.zone.ZoneEO;
import org.zstack.header.zone.ZoneVO;
import org.zstack.test.Api;
import org.zstack.test.ApiSenderException;
import org.zstack.test.DBUtil;
import org.zstack.test.deployer.Deployer;
import java.util.concurrent.TimeUnit;
/**
*
* delete vm
*/
public class TestCascadeDeletion4 {
Deployer deployer;
Api api;
ComponentLoader loader;
CloudBus bus;
DatabaseFacade dbf;
@Before
public void setUp() throws Exception {
DBUtil.reDeployDB();
deployer = new Deployer("deployerXml/vm/TestCreateVm.xml");
deployer.build();
api = deployer.getApi();
loader = deployer.getComponentLoader();
bus = loader.getComponent(CloudBus.class);
dbf = loader.getComponent(DatabaseFacade.class);
}
@Test
public void test() throws ApiSenderException, InterruptedException {
VmInstanceInventory vminv = deployer.vms.get("TestVm");
DiskOfferingInventory do1 = deployer.diskOfferings.get("TestRootDiskOffering");
DiskOfferingInventory do2 = deployer.diskOfferings.get("TestDataDiskOffering");
InstanceOfferingInventory io = deployer.instanceOfferings.get("TestInstanceOffering");
BackupStorageInventory bs = deployer.backupStorages.get("TestBackupStorage");
api.destroyVmInstance(vminv.getUuid());
long count = dbf.count(ZoneVO.class);
Assert.assertTrue(0 != count);
count = dbf.count(ClusterVO.class);
Assert.assertTrue(0 != count);
count = dbf.count(HostVO.class);
Assert.assertTrue(0 != count);
count = dbf.count(VmInstanceVO.class);
Assert.assertEquals(0, count);
count = dbf.count(PrimaryStorageVO.class);
Assert.assertTrue(0 != count);
count = dbf.count(L2NetworkVO.class);
Assert.assertTrue(0 != count);
count = dbf.count(L3NetworkVO.class);
Assert.assertTrue(0 != count);
count = dbf.count(IpRangeVO.class);
Assert.assertTrue(0 != count);
DiskOfferingVO dvo = dbf.findByUuid(do1.getUuid(), DiskOfferingVO.class);
Assert.assertNotNull(dvo);
dvo = dbf.findByUuid(do2.getUuid(), DiskOfferingVO.class);
Assert.assertNotNull(dvo);
InstanceOfferingVO ivo = dbf.findByUuid(io.getUuid(), InstanceOfferingVO.class);
Assert.assertNotNull(ivo);
BackupStorageVO bvo = dbf.findByUuid(bs.getUuid(), BackupStorageVO.class);
Assert.assertNotNull(bvo);
CascadeTestHelper helper = new CascadeTestHelper();
helper.zeroInDatabase(
VmInstanceEO.class
);
}
}
| apache-2.0 |
nizhikov/ignite | modules/platforms/cpp/odbc-test/src/test_utils.cpp | 5145 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <boost/test/unit_test.hpp>
#include <cassert>
#include <ignite/common/platform_utils.h>
#include "test_utils.h"
namespace ignite_test
{
OdbcClientError GetOdbcError(SQLSMALLINT handleType, SQLHANDLE handle)
{
SQLCHAR sqlstate[7] = {};
SQLINTEGER nativeCode;
SQLCHAR message[ODBC_BUFFER_SIZE];
SQLSMALLINT reallen = 0;
SQLGetDiagRec(handleType, handle, 1, sqlstate, &nativeCode, message, ODBC_BUFFER_SIZE, &reallen);
return OdbcClientError(
std::string(reinterpret_cast<char*>(sqlstate)),
std::string(reinterpret_cast<char*>(message), reallen));
}
std::string GetOdbcErrorState(SQLSMALLINT handleType, SQLHANDLE handle)
{
SQLCHAR sqlstate[7] = {};
SQLINTEGER nativeCode;
SQLCHAR message[ODBC_BUFFER_SIZE];
SQLSMALLINT reallen = 0;
SQLGetDiagRec(handleType, handle, 1, sqlstate, &nativeCode, message, ODBC_BUFFER_SIZE, &reallen);
return std::string(reinterpret_cast<char*>(sqlstate));
}
std::string GetOdbcErrorMessage(SQLSMALLINT handleType, SQLHANDLE handle)
{
SQLCHAR sqlstate[7] = {};
SQLINTEGER nativeCode;
SQLCHAR message[ODBC_BUFFER_SIZE];
SQLSMALLINT reallen = 0;
SQLGetDiagRec(handleType, handle, 1, sqlstate, &nativeCode, message, ODBC_BUFFER_SIZE, &reallen);
std::string res(reinterpret_cast<char*>(sqlstate));
if (!res.empty())
res.append(": ").append(reinterpret_cast<char*>(message), reallen);
else
res = "No results";
return res;
}
std::string GetTestConfigDir()
{
return ignite::common::GetEnv("IGNITE_NATIVE_TEST_ODBC_CONFIG_PATH");
}
void InitConfig(ignite::IgniteConfiguration& cfg, const char* cfgFile)
{
using namespace ignite;
assert(cfgFile != 0);
cfg.jvmOpts.push_back("-Xdebug");
cfg.jvmOpts.push_back("-Xnoagent");
cfg.jvmOpts.push_back("-Djava.compiler=NONE");
cfg.jvmOpts.push_back("-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005");
cfg.jvmOpts.push_back("-XX:+HeapDumpOnOutOfMemoryError");
cfg.jvmOpts.push_back("-Duser.timezone=GMT");
cfg.jvmOpts.push_back("-DIGNITE_QUIET=false");
cfg.jvmOpts.push_back("-DIGNITE_CONSOLE_APPENDER=false");
cfg.jvmOpts.push_back("-DIGNITE_UPDATE_NOTIFIER=false");
cfg.jvmOpts.push_back("-DIGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP=false");
cfg.jvmOpts.push_back("-Duser.language=en");
// Un-comment to debug SSL
//cfg.jvmOpts.push_back("-Djavax.net.debug=ssl");
cfg.igniteHome = jni::ResolveIgniteHome();
cfg.jvmClassPath = jni::CreateIgniteHomeClasspath(cfg.igniteHome, true);
#ifdef IGNITE_TESTS_32
cfg.jvmInitMem = 256;
cfg.jvmMaxMem = 768;
#else
cfg.jvmInitMem = 1024;
cfg.jvmMaxMem = 4096;
#endif
char* cfgPath = getenv("IGNITE_NATIVE_TEST_ODBC_CONFIG_PATH");
assert(cfgPath != 0);
cfg.springCfgPath = std::string(cfgPath).append("/").append(cfgFile);
}
ignite::Ignite StartNode(const char* cfgFile)
{
using namespace ignite;
IgniteConfiguration cfg;
InitConfig(cfg, cfgFile);
return Ignition::Start(cfg);
}
ignite::Ignite StartNode(const char* cfgFile, const char* name)
{
using namespace ignite;
assert(name != 0);
IgniteConfiguration cfg;
InitConfig(cfg, cfgFile);
return Ignition::Start(cfg, name);
}
ignite::Ignite StartPlatformNode(const char* cfg, const char* name)
{
std::string config(cfg);
#ifdef IGNITE_TESTS_32
// Cutting off the ".xml" part.
config.resize(config.size() - 4);
config += "-32.xml";
#endif //IGNITE_TESTS_32
return StartNode(config.c_str(), name);
}
std::string AppendPath(const std::string& base, const std::string& toAdd)
{
std::stringstream stream;
stream << base << ignite::common::Fs << toAdd;
return stream.str();
}
void ClearLfs()
{
std::string home = ignite::jni::ResolveIgniteHome();
std::string workDir = AppendPath(home, "work");
ignite::common::DeletePath(workDir);
}
}
| apache-2.0 |
kyoren/https-github.com-h2oai-h2o-3 | h2o-docs/src/booklets/v2_2015/source/gbm/gbm_predict.py | 176 | # Perform classification on the held out data
prediction = air_model.predict(air_test_hex)
# Copy predictions from H2O to Python
pred = prediction.as_data_frame()
pred.head() | apache-2.0 |
mglukhikh/intellij-community | platform/diff-api/src/com/intellij/ide/diff/DiffErrorElement.java | 2277 | /*
* Copyright 2000-2011 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ide.diff;
import com.intellij.diff.chains.DiffRequestProducerException;
import com.intellij.diff.contents.DiffContent;
import com.intellij.openapi.progress.ProcessCanceledException;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.project.Project;
import com.intellij.util.PlatformIcons;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.io.IOException;
/**
* @author Konstantin Bulenkov
*/
public class DiffErrorElement extends DiffElement {
private final String myMessage;
public DiffErrorElement() {
this("Can't load children", "");
}
public DiffErrorElement(@NotNull String message, @NotNull String description) {
myMessage = message;
}
@Override
public String getPath() {
return "";
}
@NotNull
@Override
public String getName() {
return myMessage;
}
@Override
public long getSize() {
return -1;
}
@Override
public long getTimeStamp() {
return -1;
}
@Override
public boolean isContainer() {
return false;
}
@Override
public DiffElement[] getChildren() {
return EMPTY_ARRAY;
}
@Nullable
@Override
public byte[] getContent() {
return null;
}
@Override
public Object getValue() {
return null;
}
@Override
public Icon getIcon() {
return PlatformIcons.ERROR_INTRODUCTION_ICON;
}
@NotNull
public DiffContent createDiffContent(@Nullable Project project, @NotNull ProgressIndicator indicator)
throws DiffRequestProducerException, ProcessCanceledException {
throw new DiffRequestProducerException(myMessage);
}
}
| apache-2.0 |
tuijldert/jitsi | src/net/java/sip/communicator/plugin/certconfig/CertConfigTableModel.java | 3103 | /*
* Jitsi, the OpenSource Java VoIP and Instant Messaging client.
*
* Copyright @ 2015 Atlassian Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.java.sip.communicator.plugin.certconfig;
import java.beans.*;
import java.util.*;
import javax.swing.table.*;
import net.java.sip.communicator.service.certificate.*;
import org.jitsi.service.resources.*;
/**
* Backing data model for a JTable that displays the client certificate
* configuration entries.
*
* @author Ingo Bauersachs
*/
public class CertConfigTableModel
extends AbstractTableModel
implements PropertyChangeListener
{
private static final long serialVersionUID = -6369348252411082340L;
private CertificateService cvs;
private List<CertificateConfigEntry> model;
private ResourceManagementService R = CertConfigActivator.R;
/**
* Constructor.
*/
public CertConfigTableModel()
{
CertConfigActivator.getConfigService().addPropertyChangeListener(this);
cvs = CertConfigActivator.getCertService();
model = cvs.getClientAuthCertificateConfigs();
}
public int getRowCount()
{
return model.size();
}
public int getColumnCount()
{
return 3;
}
public Object getValueAt(int rowIndex, int columnIndex)
{
switch(columnIndex)
{
case 0:
return model.get(rowIndex).getDisplayName();
case 1:
return model.get(rowIndex).getAlias();
case 2:
return model.get(rowIndex).getKeyStoreType();
}
return null;
}
/**
* Get <tt>CertificateConfigEntry</tt> located at <tt>rowIndex</tt>.
*
* @param rowIndex row index
* @return <tt>CertificateConfigEntry</tt>
*/
public CertificateConfigEntry getItem(int rowIndex)
{
return model.get(rowIndex);
}
@Override
public String getColumnName(int column)
{
switch(column)
{
case 0:
return R.getI18NString("service.gui.DISPLAY_NAME");
case 1:
return R.getI18NString("plugin.certconfig.ALIAS");
case 2:
return R.getI18NString("plugin.certconfig.KEYSTORE_TYPE");
}
return super.getColumnName(column);
}
public void propertyChange(PropertyChangeEvent evt)
{
if (evt.getPropertyName().startsWith(
CertificateService.PNAME_CLIENTAUTH_CERTCONFIG_BASE))
{
model = cvs.getClientAuthCertificateConfigs();
super.fireTableDataChanged();
}
}
}
| apache-2.0 |
KeyNexus/netty | src/main/java/org/jboss/netty/logging/InternalLogLevel.java | 974 | /*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.logging;
/**
* The log level that {@link InternalLogger} can log at.
*/
public enum InternalLogLevel {
/**
* 'DEBUG' log level.
*/
DEBUG,
/**
* 'INFO' log level.
*/
INFO,
/**
* 'WARN' log level.
*/
WARN,
/**
* 'ERROR' log level.
*/
ERROR
}
| apache-2.0 |
LongbinChen/annoy | examples/precision_test.py | 1293 | from __future__ import print_function
import random, time
from annoy import AnnoyIndex
try:
xrange
except NameError:
# Python 3 compat
xrange = range
def precision(f=40, n=1000000):
t = AnnoyIndex(f)
for i in xrange(n):
v = []
for z in xrange(f):
v.append(random.gauss(0, 1))
t.add_item(i, v)
t.build(2 * f)
t.save('test.tree')
limits = [10, 100, 1000, 10000]
k = 10
prec_sum = {}
prec_n = 1000
time_sum = {}
for i in xrange(prec_n):
j = random.randrange(0, n)
print('finding nbs for', j)
closest = set(t.get_nns_by_item(j, n)[:k])
for limit in limits:
t0 = time.time()
toplist = t.get_nns_by_item(j, limit)
T = time.time() - t0
found = len(closest.intersection(toplist))
hitrate = 1.0 * found / k
prec_sum[limit] = prec_sum.get(limit, 0.0) + hitrate
time_sum[limit] = time_sum.get(limit, 0.0) + T
for limit in limits:
print('limit: %-9d precision: %6.2f%% avg time: %.6fs'
% (limit, 100.0 * prec_sum[limit] / (i + 1),
time_sum[limit] / (i + 1)))
if __name__ == '__main__':
precision()
| apache-2.0 |
nvoron23/arangodb | js/apps/system/_admin/aardvark/APP/frontend/js/graphViewer/graphViewer.js | 7837 | /*global _, $*/
/*global ArangoAdapter, JSONAdapter, FoxxAdapter, PreviewAdapter, GharialAdapter*/
/*global ForceLayouter, EdgeShaper, NodeShaper, ZoomManager */
////////////////////////////////////////////////////////////////////////////////
/// @brief Graph functionality
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Michael Hackstein
/// @author Copyright 2011-2013, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
function GraphViewer(svg, width, height, adapterConfig, config) {
"use strict";
// Make the html aware of xmlns:xlink
$("html").attr("xmlns:xlink", "http://www.w3.org/1999/xlink");
// Check if all required inputs are given
if (svg === undefined || svg.append === undefined) {
throw "SVG has to be given and has to be selected using d3.select";
}
if (width === undefined || width <= 0) {
throw "A width greater 0 has to be given";
}
if (height === undefined || height <= 0) {
throw "A height greater 0 has to be given";
}
if (adapterConfig === undefined || adapterConfig.type === undefined) {
throw "An adapter configuration has to be given";
}
// Globally disable the right-click menu
/*
svg[0][0].oncontextmenu = function() {
return false;
};
*/
var self = this,
adapter,
nodeShaper,
edgeShaper,
layouter,
zoomManager,
graphContainer,
nodeContainer,
edgeContainer,
fixedSize,
edges = [],
nodes = [],
parseLayouterConfig = function (config) {
if (!config) {
// Default
config = {};
config.nodes = nodes;
config.links = edges;
config.width = width;
config.height = height;
layouter = new ForceLayouter(config);
return;
}
switch (config.type.toLowerCase()) {
case "force":
config.nodes = nodes;
config.links = edges;
config.width = width;
config.height = height;
layouter = new ForceLayouter(config);
break;
default:
throw "Sorry unknown layout type.";
}
},
nodeLimitCallBack = function(limit) {
adapter.setNodeLimit(limit, self.start);
},
parseZoomConfig = function(config) {
if (config) {
zoomManager = new ZoomManager(width, height, svg,
graphContainer, nodeShaper, edgeShaper,
{}, nodeLimitCallBack);
}
},
parseConfig = function(config) {
var esConf = config.edgeShaper || {},
nsConf = config.nodeShaper || {},
idFunc = nsConf.idfunc || undefined,
zConf = config.zoom || false;
esConf.shape = esConf.shape || {
type: EdgeShaper.shapes.ARROW
};
parseLayouterConfig(config.layouter);
edgeContainer = graphContainer.append("g");
edgeShaper = new EdgeShaper(edgeContainer, esConf);
nodeContainer = graphContainer.append("g");
nodeShaper = new NodeShaper(nodeContainer, nsConf, idFunc);
layouter.setCombinedUpdateFunction(nodeShaper, edgeShaper);
parseZoomConfig(zConf);
};
switch (adapterConfig.type.toLowerCase()) {
case "arango":
adapterConfig.width = width;
adapterConfig.height = height;
adapter = new ArangoAdapter(
nodes,
edges,
this,
adapterConfig
);
adapter.setChildLimit(10);
break;
case "gharial":
adapterConfig.width = width;
adapterConfig.height = height;
adapter = new GharialAdapter(
nodes,
edges,
this,
adapterConfig
);
adapter.setChildLimit(10);
break;
case "foxx":
adapterConfig.width = width;
adapterConfig.height = height;
adapter = new FoxxAdapter(
nodes,
edges,
adapterConfig.route,
this,
adapterConfig
);
break;
case "json":
adapter = new JSONAdapter(
adapterConfig.path,
nodes,
edges,
this,
width,
height
);
break;
case "preview":
adapterConfig.width = width;
adapterConfig.height = height;
adapter = new PreviewAdapter(
nodes,
edges,
this,
adapterConfig
);
break;
default:
throw "Sorry unknown adapter type.";
}
graphContainer = svg.append("g");
parseConfig(config || {});
this.start = function(expand) {
layouter.stop();
if (expand) {
if ($('.infoField').text() !== '') {
_.each(nodes, function(node) {
_.each(adapter.randomNodes, function(compare) {
if (node._id === compare._id) {
node._expanded = true;
}
});
});
}
else {
_.each(nodes, function(node) {
node._expanded = true;
});
}
}
//expand all wanted nodes
nodeShaper.drawNodes(nodes);
edgeShaper.drawEdges(edges);
layouter.start();
};
this.loadGraph = function(nodeId, callback) {
// loadNode
// loadInitialNode
adapter.loadInitialNode(nodeId, function (node) {
if (node.errorCode) {
callback(node);
return;
}
node._expanded = true;
self.start();
if (_.isFunction(callback)) {
callback();
}
});
};
this.loadGraphWithRandomStart = function(callback) {
adapter.loadRandomNode(function (node) {
if (node.errorCode && node.errorCode === 404) {
callback(node);
return;
}
node._expanded = true;
self.start(true);
if (_.isFunction(callback)) {
callback();
}
});
};
this.loadGraphWithAdditionalNode = function(attribute, value, callback) {
adapter.loadAdditionalNodeByAttributeValue(attribute, value, function (node) {
if (node.errorCode) {
callback(node);
return;
}
node._expanded = true;
self.start();
if (_.isFunction(callback)) {
callback();
}
});
};
this.loadGraphWithAttributeValue = function(attribute, value, callback) {
//clear random and defined nodes
adapter.randomNodes = [];
adapter.definedNodes = [];
adapter.loadInitialNodeByAttributeValue(attribute, value, function (node) {
if (node.errorCode) {
callback(node);
return;
}
node._expanded = true;
self.start();
if (_.isFunction(callback)) {
callback();
}
});
};
this.cleanUp = function() {
nodeShaper.resetColourMap();
edgeShaper.resetColourMap();
};
this.changeWidth = function(w) {
layouter.changeWidth(w);
zoomManager.changeWidth(w);
adapter.setWidth(w);
};
this.dispatcherConfig = {
expand: {
edges: edges,
nodes: nodes,
startCallback: self.start,
adapter: adapter,
reshapeNodes: nodeShaper.reshapeNodes
},
drag: {
layouter: layouter
},
nodeEditor: {
nodes: nodes,
adapter: adapter
},
edgeEditor: {
edges: edges,
adapter: adapter
}
};
this.adapter = adapter;
this.nodeShaper = nodeShaper;
this.edgeShaper = edgeShaper;
this.layouter = layouter;
this.zoomManager = zoomManager;
}
| apache-2.0 |
weswigham/TypeScript | tests/baselines/reference/recursiveProperties.js | 715 | //// [recursiveProperties.ts]
class A {
get testProp() { return this.testProp; }
}
class B {
set testProp(value:string) { this.testProp = value; }
}
//// [recursiveProperties.js]
var A = /** @class */ (function () {
function A() {
}
Object.defineProperty(A.prototype, "testProp", {
get: function () { return this.testProp; },
enumerable: true,
configurable: true
});
return A;
}());
var B = /** @class */ (function () {
function B() {
}
Object.defineProperty(B.prototype, "testProp", {
set: function (value) { this.testProp = value; },
enumerable: true,
configurable: true
});
return B;
}());
| apache-2.0 |
shixuan-fan/presto | presto-common/src/main/java/com/facebook/presto/common/type/DecimalParseResult.java | 1653 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.common.type;
import java.util.Objects;
public class DecimalParseResult
{
private final Object object;
private final DecimalType type;
public DecimalParseResult(Object object, DecimalType type)
{
this.object = object;
this.type = type;
}
public Object getObject()
{
return object;
}
public DecimalType getType()
{
return type;
}
@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DecimalParseResult that = (DecimalParseResult) o;
return Objects.equals(object, that.object) &&
Objects.equals(type, that.type);
}
@Override
public int hashCode()
{
return Objects.hash(object, type);
}
@Override
public String toString()
{
return "ParseResult{" +
"object=" + object +
", type=" + type +
'}';
}
}
| apache-2.0 |
chrislovecnm/kubernetes | pkg/registry/core/service/storage/rest_test.go | 136793 | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"fmt"
"net"
"reflect"
"sort"
"testing"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/watch"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
"k8s.io/apiserver/pkg/util/dryrun"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
epstest "k8s.io/kubernetes/pkg/api/endpoints/testing"
"k8s.io/kubernetes/pkg/api/service"
svctest "k8s.io/kubernetes/pkg/api/service/testing"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
endpointstore "k8s.io/kubernetes/pkg/registry/core/endpoint/storage"
podstore "k8s.io/kubernetes/pkg/registry/core/pod/storage"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
"k8s.io/kubernetes/pkg/registry/registrytest"
netutil "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
// TODO(wojtek-t): Cleanup this file.
// It is now testing mostly the same things as other resources but
// in a completely different way. We should unify it.
type serviceStorage struct {
Services map[string]*api.Service
}
func (s *serviceStorage) saveService(svc *api.Service) {
if s.Services == nil {
s.Services = map[string]*api.Service{}
}
s.Services[svc.Name] = svc.DeepCopy()
}
func (s *serviceStorage) NamespaceScoped() bool {
return true
}
func (s *serviceStorage) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
if s.Services[name] == nil {
return nil, fmt.Errorf("service %q not found", name)
}
return s.Services[name].DeepCopy(), nil
}
func getService(getter rest.Getter, ctx context.Context, name string, options *metav1.GetOptions) (*api.Service, error) {
obj, err := getter.Get(ctx, name, options)
if err != nil {
return nil, err
}
return obj.(*api.Service), nil
}
func (s *serviceStorage) NewList() runtime.Object {
panic("not implemented")
}
func (s *serviceStorage) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) {
ns, _ := genericapirequest.NamespaceFrom(ctx)
keys := make([]string, 0, len(s.Services))
for k := range s.Services {
keys = append(keys, k)
}
sort.Strings(keys)
res := new(api.ServiceList)
for _, k := range keys {
svc := s.Services[k]
if ns == metav1.NamespaceAll || ns == svc.Namespace {
res.Items = append(res.Items, *svc)
}
}
return res, nil
}
func (s *serviceStorage) New() runtime.Object {
panic("not implemented")
}
func (s *serviceStorage) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
if dryrun.IsDryRun(options.DryRun) {
return obj, nil
}
svc := obj.(*api.Service)
s.saveService(svc)
s.Services[svc.Name].ResourceVersion = "1"
return s.Services[svc.Name].DeepCopy(), nil
}
func (s *serviceStorage) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
obj, err := objInfo.UpdatedObject(ctx, nil)
if err != nil {
return nil, false, err
}
if !dryrun.IsDryRun(options.DryRun) {
s.saveService(obj.(*api.Service))
}
return obj, false, nil
}
func (s *serviceStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) {
ret := s.Services[name]
delete(s.Services, name)
return ret, false, nil
}
func (s *serviceStorage) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) {
panic("not implemented")
}
func (s *serviceStorage) Watch(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) {
panic("not implemented")
}
func (s *serviceStorage) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
panic("not implemented")
}
func (s *serviceStorage) StorageVersion() runtime.GroupVersioner {
panic("not implemented")
}
// GetResetFields implements rest.ResetFieldsStrategy
func (s *serviceStorage) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
return nil
}
func NewTestREST(t *testing.T, ipFamilies []api.IPFamily) (*REST, *etcd3testing.EtcdTestServer) {
return NewTestRESTWithPods(t, nil, nil, ipFamilies)
}
func NewTestRESTWithPods(t *testing.T, endpoints []*api.Endpoints, pods []api.Pod, ipFamilies []api.IPFamily) (*REST, *etcd3testing.EtcdTestServer) {
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
serviceStorage := &serviceStorage{}
podStorage, err := podstore.NewStorage(generic.RESTOptions{
StorageConfig: etcdStorage,
Decorator: generic.UndecoratedStorage,
DeleteCollectionWorkers: 3,
ResourcePrefix: "pods",
}, nil, nil, nil)
if err != nil {
t.Fatalf("unexpected error from REST storage: %v", err)
}
ctx := genericapirequest.NewDefaultContext()
for ix := range pods {
key, _ := podStorage.Pod.KeyFunc(ctx, pods[ix].Name)
if err := podStorage.Pod.Storage.Create(ctx, key, &pods[ix], nil, 0, false); err != nil {
t.Fatalf("Couldn't create pod: %v", err)
}
}
endpointStorage, err := endpointstore.NewREST(generic.RESTOptions{
StorageConfig: etcdStorage,
Decorator: generic.UndecoratedStorage,
ResourcePrefix: "endpoints",
})
if err != nil {
t.Fatalf("unexpected error from REST storage: %v", err)
}
for ix := range endpoints {
key, _ := endpointStorage.KeyFunc(ctx, endpoints[ix].Name)
if err := endpointStorage.Store.Storage.Create(ctx, key, endpoints[ix], nil, 0, false); err != nil {
t.Fatalf("Couldn't create endpoint: %v", err)
}
}
var rPrimary ipallocator.Interface
var rSecondary ipallocator.Interface
if len(ipFamilies) < 1 || len(ipFamilies) > 2 {
t.Fatalf("unexpected ipfamilies passed: %v", ipFamilies)
}
for i, family := range ipFamilies {
var r ipallocator.Interface
switch family {
case api.IPv4Protocol:
r, err = ipallocator.NewCIDRRange(makeIPNet(t))
if err != nil {
t.Fatalf("cannot create CIDR Range %v", err)
}
case api.IPv6Protocol:
r, err = ipallocator.NewCIDRRange(makeIPNet6(t))
if err != nil {
t.Fatalf("cannot create CIDR Range %v", err)
}
}
switch i {
case 0:
rPrimary = r
case 1:
rSecondary = r
}
}
portRange := utilnet.PortRange{Base: 30000, Size: 1000}
portAllocator, err := portallocator.NewPortAllocator(portRange)
if err != nil {
t.Fatalf("cannot create port allocator %v", err)
}
rest, _ := NewREST(serviceStorage, endpointStorage, podStorage.Pod, rPrimary, rSecondary, portAllocator, nil)
return rest, server
}
func makeIPNet(t *testing.T) *net.IPNet {
_, net, err := net.ParseCIDR("1.2.3.0/24")
if err != nil {
t.Error(err)
}
return net
}
func makeIPNet6(t *testing.T) *net.IPNet {
_, net, err := net.ParseCIDR("2000::/108")
if err != nil {
t.Error(err)
}
return net
}
func ipIsAllocated(t *testing.T, alloc ipallocator.Interface, ipstr string) bool {
t.Helper()
ip := net.ParseIP(ipstr)
if ip == nil {
t.Errorf("error parsing IP %q", ipstr)
return false
}
return alloc.Has(ip)
}
func portIsAllocated(t *testing.T, alloc portallocator.Interface, port int32) bool {
t.Helper()
if port == 0 {
t.Errorf("port is 0")
return false
}
return alloc.Has(int(port))
}
func TestServiceRegistryCreate(t *testing.T) {
testCases := []struct {
svc *api.Service
name string
families []api.IPFamily
enableDualStack bool
}{{
name: "Service IPFamily default cluster dualstack:off",
enableDualStack: false,
families: []api.IPFamily{api.IPv4Protocol},
svc: svctest.MakeService("foo"),
}, {
name: "Service IPFamily:v4 dualstack off",
enableDualStack: false,
families: []api.IPFamily{api.IPv4Protocol},
svc: svctest.MakeService("foo", svctest.SetIPFamilies(api.IPv4Protocol)),
}, {
name: "Service IPFamily:v4 dualstack on",
enableDualStack: true,
families: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
svc: svctest.MakeService("foo", svctest.SetIPFamilies(api.IPv4Protocol)),
}, {
name: "Service IPFamily:v6 dualstack on",
enableDualStack: true,
families: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
svc: svctest.MakeService("foo", svctest.SetIPFamilies(api.IPv6Protocol)),
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
storage, server := NewTestREST(t, tc.families)
defer server.Terminate(t)
ctx := genericapirequest.NewDefaultContext()
createdSvc, err := storage.Create(ctx, tc.svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating service %v", err)
}
createdService := createdSvc.(*api.Service)
objMeta, err := meta.Accessor(createdService)
if err != nil {
t.Fatal(err)
}
if !metav1.HasObjectMetaSystemFieldValues(objMeta) {
t.Errorf("storage did not populate object meta field values")
}
if createdService.Name != "foo" {
t.Errorf("Expected foo, but got %v", createdService.Name)
}
if createdService.CreationTimestamp.IsZero() {
t.Errorf("Expected timestamp to be set, got: %v", createdService.CreationTimestamp)
}
for i, family := range createdService.Spec.IPFamilies {
allocator := storage.serviceIPAllocatorsByFamily[family]
c := allocator.CIDR()
cidr := &c
if !cidr.Contains(net.ParseIP(createdService.Spec.ClusterIPs[i])) {
t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[i])
}
}
srv, err := getService(storage, ctx, tc.svc.Name, &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if srv == nil {
t.Errorf("Failed to find service: %s", tc.svc.Name)
}
})
}
}
func TestServiceRegistryCreateDryRun(t *testing.T) {
testCases := []struct {
name string
svc *api.Service
enableDualStack bool
}{{
name: "v4 service featuregate off",
enableDualStack: false,
svc: svctest.MakeService("foo", svctest.SetClusterIPs("1.2.3.4")),
}, {
name: "v6 service featuregate on but singlestack",
enableDualStack: true,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv6Protocol),
svctest.SetClusterIPs("2000::1")),
}, {
name: "dualstack v4,v6 service",
enableDualStack: true,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetClusterIPs("1.2.3.4", "2000::1")),
}, {
name: "dualstack v6,v4 service",
enableDualStack: true,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv6Protocol, api.IPv4Protocol),
svctest.SetClusterIPs("2000::1", "1.2.3.4")),
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, tc.enableDualStack)()
families := []api.IPFamily{api.IPv4Protocol}
if tc.enableDualStack {
families = append(families, api.IPv6Protocol)
}
storage, server := NewTestREST(t, families)
defer server.Terminate(t)
ctx := genericapirequest.NewDefaultContext()
_, err := storage.Create(ctx, tc.svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
for i, family := range tc.svc.Spec.IPFamilies {
alloc := storage.serviceIPAllocatorsByFamily[family]
if ipIsAllocated(t, alloc, tc.svc.Spec.ClusterIPs[i]) {
t.Errorf("unexpected side effect: ip allocated %v", tc.svc.Spec.ClusterIPs[i])
}
}
_, err = getService(storage, ctx, tc.svc.Name, &metav1.GetOptions{})
if err == nil {
t.Errorf("expected error")
}
})
}
}
func TestDryRunNodePort(t *testing.T) {
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
// Test dry run create request with a node port
svc := svctest.MakeService("foo", svctest.SetTypeNodePort)
ctx := genericapirequest.NewDefaultContext()
obj, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
createdSvc := obj.(*api.Service)
if createdSvc.Spec.Ports[0].NodePort == 0 {
t.Errorf("expected NodePort value assigned")
}
if portIsAllocated(t, storage.serviceNodePorts, createdSvc.Spec.Ports[0].NodePort) {
t.Errorf("unexpected side effect: NodePort allocated")
}
_, err = getService(storage, ctx, svc.Name, &metav1.GetOptions{})
if err == nil {
// Should get a not-found.
t.Errorf("expected error")
}
// Test dry run create request with multi node port
svc = svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6503), api.ProtocolTCP),
svctest.MakeServicePort("port-udp", 53, intstr.FromInt(6503), api.ProtocolUDP)),
svctest.SetNodePorts(30053, 30053))
expectNodePorts := collectServiceNodePorts(svc)
obj, err = storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
createdSvc = obj.(*api.Service)
actualNodePorts := collectServiceNodePorts(createdSvc)
if !reflect.DeepEqual(actualNodePorts, expectNodePorts) {
t.Errorf("Expected %v, but got %v", expectNodePorts, actualNodePorts)
}
for i := range svc.Spec.Ports {
if portIsAllocated(t, storage.serviceNodePorts, svc.Spec.Ports[i].NodePort) {
t.Errorf("unexpected side effect: NodePort allocated")
}
}
_, err = getService(storage, ctx, svc.Name, &metav1.GetOptions{})
if err == nil {
// Should get a not-found.
t.Errorf("expected error")
}
// Test dry run create request with multiple unspecified node ports,
// so PortAllocationOperation.AllocateNext() will be called multiple times.
svc = svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-a", 53, intstr.FromInt(6503), api.ProtocolTCP),
svctest.MakeServicePort("port-b", 54, intstr.FromInt(6504), api.ProtocolUDP)))
obj, err = storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
createdSvc = obj.(*api.Service)
actualNodePorts = collectServiceNodePorts(createdSvc)
if len(actualNodePorts) != len(svc.Spec.Ports) {
t.Fatalf("Expected service to have %d ports, but got %v", len(svc.Spec.Ports), actualNodePorts)
}
seen := map[int]bool{}
for _, np := range actualNodePorts {
if seen[np] {
t.Errorf("Expected unique port numbers, but got %v", actualNodePorts)
} else {
seen[np] = true
}
}
}
func TestServiceRegistryCreateMultiNodePortsService(t *testing.T) {
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
testCases := []struct {
svc *api.Service
name string
expectNodePorts []int
}{{
svc: svctest.MakeService("foo1",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6503), api.ProtocolTCP),
svctest.MakeServicePort("port-udp", 53, intstr.FromInt(6503), api.ProtocolUDP)),
svctest.SetNodePorts(30053, 30053)),
name: "foo1",
expectNodePorts: []int{30053, 30053},
}, {
svc: svctest.MakeService("foo2",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 54, intstr.FromInt(6504), api.ProtocolTCP),
svctest.MakeServicePort("port-udp", 54, intstr.FromInt(6504), api.ProtocolUDP)),
svctest.SetNodePorts(30054, 30054)),
name: "foo2",
expectNodePorts: []int{30054, 30054},
}, {
svc: svctest.MakeService("foo3",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 55, intstr.FromInt(6505), api.ProtocolTCP),
svctest.MakeServicePort("port-udp", 55, intstr.FromInt(6506), api.ProtocolUDP)),
svctest.SetNodePorts(30055, 30056)),
name: "foo3",
expectNodePorts: []int{30055, 30056},
}}
ctx := genericapirequest.NewDefaultContext()
for _, test := range testCases {
createdSvc, err := storage.Create(ctx, test.svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
createdService := createdSvc.(*api.Service)
objMeta, err := meta.Accessor(createdService)
if err != nil {
t.Fatal(err)
}
if !metav1.HasObjectMetaSystemFieldValues(objMeta) {
t.Errorf("storage did not populate object meta field values")
}
if createdService.Name != test.name {
t.Errorf("Expected %s, but got %s", test.name, createdService.Name)
}
serviceNodePorts := collectServiceNodePorts(createdService)
if !reflect.DeepEqual(serviceNodePorts, test.expectNodePorts) {
t.Errorf("Expected %v, but got %v", test.expectNodePorts, serviceNodePorts)
}
srv, err := getService(storage, ctx, test.name, &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if srv == nil {
t.Fatalf("Failed to find service: %s", test.name)
}
for i := range serviceNodePorts {
nodePort := serviceNodePorts[i]
// Release the node port at the end of the test case.
storage.serviceNodePorts.Release(nodePort)
}
}
}
func TestServiceStorageValidatesCreate(t *testing.T) {
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
failureCases := map[string]*api.Service{
"empty ID": svctest.MakeService(""),
"empty port": svctest.MakeService("foo", svctest.SetPorts(
svctest.MakeServicePort("p", 0, intstr.FromInt(80), api.ProtocolTCP))),
"missing targetPort": svctest.MakeService("foo", svctest.SetPorts(
svctest.MakeServicePort("p", 80, intstr.IntOrString{}, api.ProtocolTCP))),
}
ctx := genericapirequest.NewDefaultContext()
for _, failureCase := range failureCases {
c, err := storage.Create(ctx, failureCase, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if c != nil {
t.Errorf("Expected nil object")
}
if !errors.IsInvalid(err) {
t.Errorf("Expected to get an invalid resource error, got %v", err)
}
}
}
func TestServiceRegistryUpdate(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
_, err := storage.Create(ctx, svctest.MakeService("foo"), rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
obj, err := storage.Get(ctx, "foo", &metav1.GetOptions{})
if err != nil {
t.Fatalf("unexpected error :%v", err)
}
svc := obj.(*api.Service)
// update selector
svc.Spec.Selector = map[string]string{"bar": "baz2"}
updatedSvc, created, err := storage.Update(ctx, "foo", rest.DefaultUpdatedObjectInfo(svc), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
if updatedSvc == nil {
t.Errorf("Expected non-nil object")
}
if created {
t.Errorf("expected not created")
}
updatedService := updatedSvc.(*api.Service)
if updatedService.Name != "foo" {
t.Errorf("Expected foo, but got %v", updatedService.Name)
}
}
func TestServiceRegistryUpdateUnspecifiedAllocations(t *testing.T) {
testCases := []struct {
name string
svc *api.Service // Need a clusterIP, NodePort, and HealthCheckNodePort allocated
tweak func(*api.Service)
}{{
name: "single-port",
svc: svctest.MakeService("foo",
svctest.SetTypeLoadBalancer,
svctest.SetExternalTrafficPolicy(api.ServiceExternalTrafficPolicyTypeLocal)),
tweak: nil,
}, {
name: "multi-port",
svc: svctest.MakeService("foo",
svctest.SetTypeLoadBalancer,
svctest.SetExternalTrafficPolicy(api.ServiceExternalTrafficPolicyTypeLocal),
svctest.SetPorts(
svctest.MakeServicePort("p", 80, intstr.FromInt(80), api.ProtocolTCP),
svctest.MakeServicePort("q", 443, intstr.FromInt(443), api.ProtocolTCP))),
tweak: nil,
}, {
name: "shuffle-ports",
svc: svctest.MakeService("foo",
svctest.SetTypeLoadBalancer,
svctest.SetExternalTrafficPolicy(api.ServiceExternalTrafficPolicyTypeLocal),
svctest.SetPorts(
svctest.MakeServicePort("p", 80, intstr.FromInt(80), api.ProtocolTCP),
svctest.MakeServicePort("q", 443, intstr.FromInt(443), api.ProtocolTCP))),
tweak: func(s *api.Service) {
s.Spec.Ports[0], s.Spec.Ports[1] = s.Spec.Ports[1], s.Spec.Ports[0]
},
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := tc.svc.DeepCopy()
obj, err := storage.Create(ctx, svc.DeepCopy(), rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
createdSvc := obj.(*api.Service)
if createdSvc.Spec.ClusterIP == "" {
t.Fatalf("expected ClusterIP to be set")
}
if len(createdSvc.Spec.ClusterIPs) == 0 {
t.Fatalf("expected ClusterIPs to be set")
}
for i := range createdSvc.Spec.Ports {
if createdSvc.Spec.Ports[i].NodePort == 0 {
t.Fatalf("expected NodePort[%d] to be set", i)
}
}
if createdSvc.Spec.HealthCheckNodePort == 0 {
t.Fatalf("expected HealthCheckNodePort to be set")
}
// Update from the original object - just change the selector.
svc.Spec.Selector = map[string]string{"bar": "baz2"}
svc.ResourceVersion = createdSvc.ResourceVersion
obj, _, err = storage.Update(ctx, svc.Name, rest.DefaultUpdatedObjectInfo(svc.DeepCopy()), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
updatedSvc := obj.(*api.Service)
if want, got := createdSvc.Spec.ClusterIP, updatedSvc.Spec.ClusterIP; want != got {
t.Errorf("expected ClusterIP to not change: wanted %v, got %v", want, got)
}
if want, got := createdSvc.Spec.ClusterIPs, updatedSvc.Spec.ClusterIPs; !reflect.DeepEqual(want, got) {
t.Errorf("expected ClusterIPs to not change: wanted %v, got %v", want, got)
}
portmap := func(s *api.Service) map[string]int32 {
ret := map[string]int32{}
for _, p := range s.Spec.Ports {
ret[p.Name] = p.NodePort
}
return ret
}
if want, got := portmap(createdSvc), portmap(updatedSvc); !reflect.DeepEqual(want, got) {
t.Errorf("expected NodePort to not change: wanted %v, got %v", want, got)
}
if want, got := createdSvc.Spec.HealthCheckNodePort, updatedSvc.Spec.HealthCheckNodePort; want != got {
t.Errorf("expected HealthCheckNodePort to not change: wanted %v, got %v", want, got)
}
})
}
}
func TestServiceRegistryUpdateDryRun(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
obj, err := storage.Create(ctx, svctest.MakeService("foo", svctest.SetTypeExternalName), rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
svc := obj.(*api.Service)
// Test dry run update request external name to node port
new1 := svc.DeepCopy()
svctest.SetTypeNodePort(new1)
svctest.SetNodePorts(30001)(new1) // DryRun does not set port values yet
obj, created, err := storage.Update(ctx, new1.Name, rest.DefaultUpdatedObjectInfo(new1),
rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
if obj == nil {
t.Errorf("Expected non-nil object")
}
if created {
t.Errorf("expected not created")
}
if portIsAllocated(t, storage.serviceNodePorts, new1.Spec.Ports[0].NodePort) {
t.Errorf("unexpected side effect: NodePort allocated")
}
// Test dry run update request external name to cluster ip
new2 := svc.DeepCopy()
svctest.SetTypeClusterIP(new2)
svctest.SetClusterIPs("1.2.3.4")(new2) // DryRun does not set IP values yet
_, _, err = storage.Update(ctx, svc.Name, rest.DefaultUpdatedObjectInfo(new2),
rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
if ipIsAllocated(t, storage.serviceIPAllocatorsByFamily[storage.defaultServiceIPFamily], new2.Spec.ClusterIP) {
t.Errorf("unexpected side effect: ip allocated")
}
// Test dry run update request remove node port
obj, err = storage.Create(ctx, svctest.MakeService("foo2", svctest.SetTypeNodePort, svctest.SetNodePorts(30001)), rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
svc = obj.(*api.Service)
if !ipIsAllocated(t, storage.serviceIPAllocatorsByFamily[storage.defaultServiceIPFamily], svc.Spec.ClusterIP) {
t.Errorf("expected IP to be allocated")
}
if !portIsAllocated(t, storage.serviceNodePorts, svc.Spec.Ports[0].NodePort) {
t.Errorf("expected NodePort to be allocated")
}
new3 := svc.DeepCopy()
svctest.SetTypeExternalName(new3)
_, _, err = storage.Update(ctx, svc.Name, rest.DefaultUpdatedObjectInfo(new3),
rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
if !portIsAllocated(t, storage.serviceNodePorts, svc.Spec.Ports[0].NodePort) {
t.Errorf("unexpected side effect: NodePort unallocated")
}
// Test dry run update request remove cluster ip
obj, err = storage.Create(ctx, svctest.MakeService("foo3", svctest.SetClusterIPs("1.2.3.4")), rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("expected no error: %v", err)
}
svc = obj.(*api.Service)
new4 := svc.DeepCopy()
svctest.SetTypeExternalName(new4)
_, _, err = storage.Update(ctx, svc.Name, rest.DefaultUpdatedObjectInfo(new4),
rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Fatalf("expected no error: %v", err)
}
if !ipIsAllocated(t, storage.serviceIPAllocatorsByFamily[storage.defaultServiceIPFamily], svc.Spec.ClusterIP) {
t.Errorf("unexpected side effect: ip unallocated")
}
}
func TestServiceStorageValidatesUpdate(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
_, err := storage.Create(ctx, svctest.MakeService("foo"), rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
failureCases := map[string]*api.Service{
"empty ID": svctest.MakeService(""),
"invalid selector": svctest.MakeService("", func(svc *api.Service) {
svc.Spec.Selector = map[string]string{"ThisSelectorFailsValidation": "ok"}
}),
}
for _, failureCase := range failureCases {
c, created, err := storage.Update(ctx, failureCase.Name, rest.DefaultUpdatedObjectInfo(failureCase), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err == nil {
t.Errorf("expected error")
}
if c != nil || created {
t.Errorf("Expected nil object or created false")
}
}
}
func TestServiceRegistryLoadBalancerService(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := svctest.MakeService("foo", svctest.SetTypeLoadBalancer)
_, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Errorf("Failed to create service: %#v", err)
}
srv, err := getService(storage, ctx, svc.Name, &metav1.GetOptions{})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if srv == nil {
t.Fatalf("Failed to find service: %s", svc.Name)
}
serviceNodePorts := collectServiceNodePorts(srv)
if len(serviceNodePorts) == 0 {
t.Errorf("Failed to find NodePorts of service : %s", srv.Name)
}
}
func TestAllocateLoadBalancerNodePorts(t *testing.T) {
testcases := []struct {
name string
svc *api.Service
expectNodePorts bool
allocateNodePortGate bool
expectError bool
}{{
name: "allocate false, gate on, not specified",
svc: svctest.MakeService("alloc-false",
svctest.SetTypeLoadBalancer,
svctest.SetAllocateLoadBalancerNodePorts(false)),
expectNodePorts: false,
allocateNodePortGate: true,
}, {
name: "allocate true, gate on, not specified",
svc: svctest.MakeService("alloc-true",
svctest.SetTypeLoadBalancer,
svctest.SetAllocateLoadBalancerNodePorts(true)),
expectNodePorts: true,
allocateNodePortGate: true,
}, {
name: "allocate false, gate on, port specified",
svc: svctest.MakeService("alloc-false-specific",
svctest.SetTypeLoadBalancer,
svctest.SetNodePorts(30000),
svctest.SetAllocateLoadBalancerNodePorts(false)),
expectNodePorts: true,
allocateNodePortGate: true,
}, {
name: "allocate true, gate on, port specified",
svc: svctest.MakeService("alloc-true-specific",
svctest.SetTypeLoadBalancer,
svctest.SetNodePorts(30000),
svctest.SetAllocateLoadBalancerNodePorts(true)),
expectNodePorts: true,
allocateNodePortGate: true,
}, {
name: "allocate nil, gate off",
svc: svctest.MakeService("alloc-nil",
svctest.SetTypeLoadBalancer,
func(s *api.Service) {
s.Spec.AllocateLoadBalancerNodePorts = nil
}),
expectNodePorts: true,
allocateNodePortGate: false,
}, {
name: "allocate false, gate off",
svc: svctest.MakeService("alloc-false",
svctest.SetTypeLoadBalancer,
svctest.SetAllocateLoadBalancerNodePorts(false)),
expectNodePorts: true,
allocateNodePortGate: false,
}, {
name: "allocate true, gate off",
svc: svctest.MakeService("alloc-true",
svctest.SetTypeLoadBalancer,
svctest.SetAllocateLoadBalancerNodePorts(true)),
expectNodePorts: true,
allocateNodePortGate: false,
}}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceLBNodePortControl, tc.allocateNodePortGate)()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
_, err := storage.Create(ctx, tc.svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
if tc.expectError {
return
}
t.Errorf("%s; Failed to create service: %#v", tc.name, err)
}
srv, err := getService(storage, ctx, tc.svc.Name, &metav1.GetOptions{})
if err != nil {
t.Errorf("%s; Unexpected error: %v", tc.name, err)
}
if srv == nil {
t.Fatalf("%s; Failed to find service: %s", tc.name, tc.svc.Name)
}
serviceNodePorts := collectServiceNodePorts(srv)
if (len(serviceNodePorts) != 0) != tc.expectNodePorts {
t.Errorf("%s; Allocated NodePorts not as expected", tc.name)
}
})
}
}
func TestServiceRegistryDelete(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := svctest.MakeService("foo")
_, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, _, err = storage.Delete(ctx, svc.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
func TestServiceRegistryDeleteDryRun(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
// Test dry run delete request with cluster ip
svc := svctest.MakeService("foo")
obj, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
createdSvc := obj.(*api.Service)
if createdSvc.Spec.ClusterIP == "" {
t.Fatalf("expected ClusterIP to be set")
}
if !ipIsAllocated(t, storage.serviceIPAllocatorsByFamily[storage.defaultServiceIPFamily], createdSvc.Spec.ClusterIP) {
t.Errorf("expected ClusterIP to be allocated")
}
_, _, err = storage.Delete(ctx, svc.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
if !ipIsAllocated(t, storage.serviceIPAllocatorsByFamily[storage.defaultServiceIPFamily], createdSvc.Spec.ClusterIP) {
t.Errorf("unexpected side effect: ip unallocated")
}
// Test dry run delete request with node port
svc = svctest.MakeService("foo2", svctest.SetTypeNodePort)
obj, err = storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
createdSvc = obj.(*api.Service)
if createdSvc.Spec.Ports[0].NodePort == 0 {
t.Fatalf("expected NodePort to be set")
}
if !portIsAllocated(t, storage.serviceNodePorts, createdSvc.Spec.Ports[0].NodePort) {
t.Errorf("expected NodePort to be allocated")
}
isValidClusterIPFields(t, storage, svc, svc)
_, _, err = storage.Delete(ctx, svc.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
if !portIsAllocated(t, storage.serviceNodePorts, createdSvc.Spec.Ports[0].NodePort) {
t.Errorf("unexpected side effect: NodePort unallocated")
}
}
func TestDualStackServiceRegistryDeleteDryRun(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
// dry run for non dualstack
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, true)()
dualstack_storage, dualstack_server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol})
defer dualstack_server.Terminate(t)
// Test dry run delete request with cluster ip
dualstack_svc := svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv6Protocol, api.IPv4Protocol),
svctest.SetClusterIPs("2000:0:0:0:0:0:0:1", "1.2.3.4"))
_, err := dualstack_storage.Create(ctx, dualstack_svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
isValidClusterIPFields(t, dualstack_storage, dualstack_svc, dualstack_svc)
_, _, err = dualstack_storage.Delete(ctx, dualstack_svc.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
for i, family := range dualstack_svc.Spec.IPFamilies {
if !ipIsAllocated(t, dualstack_storage.serviceIPAllocatorsByFamily[family], dualstack_svc.Spec.ClusterIPs[i]) {
t.Errorf("unexpected side effect: ip unallocated %v", dualstack_svc.Spec.ClusterIPs[i])
}
}
}
func TestServiceRegistryDeleteExternalName(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := svctest.MakeService("foo", svctest.SetTypeExternalName)
_, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, _, err = storage.Delete(ctx, svc.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{})
if err != nil {
t.Fatalf("Expected no error: %v", err)
}
}
func TestServiceRegistryUpdateLoadBalancerService(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
// Create non-loadbalancer.
svc1 := svctest.MakeService("foo")
obj, err := storage.Create(ctx, svc1, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Modify to be loadbalancer.
svc2 := obj.(*api.Service).DeepCopy()
svc2.Spec.Type = api.ServiceTypeLoadBalancer
svc2.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(true)
if _, _, err := storage.Update(ctx, svc2.Name, rest.DefaultUpdatedObjectInfo(svc2), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Change port.
svc3 := svc2.DeepCopy()
svc3.Spec.Ports[0].Port = 6504
if _, _, err := storage.Update(ctx, svc3.Name, rest.DefaultUpdatedObjectInfo(svc3), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestServiceRegistryUpdateMultiPortLoadBalancerService(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
// Create load balancer.
svc1 := svctest.MakeService("foo",
svctest.SetTypeLoadBalancer,
svctest.SetPorts(
svctest.MakeServicePort("p", 6502, intstr.FromInt(6502), api.ProtocolTCP),
svctest.MakeServicePort("q", 8086, intstr.FromInt(8086), api.ProtocolTCP)))
obj, err := storage.Create(ctx, svc1, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Modify ports
svc2 := obj.(*api.Service).DeepCopy()
svc2.Spec.Ports[1].Port = 8088
if _, _, err := storage.Update(ctx, svc2.Name, rest.DefaultUpdatedObjectInfo(svc2), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestServiceRegistryGet(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
_, err := storage.Create(ctx, svctest.MakeService("foo"), rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating service: %v", err)
}
obj, _ := storage.Get(ctx, "foo", &metav1.GetOptions{})
svc := obj.(*api.Service)
if e, a := "foo", svc.Name; e != a {
t.Errorf("Expected %v, but got %v", e, a)
}
}
// this is local because it's not fully fleshed out enough for general use.
func makePod(name string, ips ...string) api.Pod {
p := api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: metav1.NamespaceDefault,
},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSDefault,
Containers: []api.Container{{Name: "ctr", Image: "img", ImagePullPolicy: api.PullIfNotPresent, TerminationMessagePolicy: api.TerminationMessageReadFile}},
},
Status: api.PodStatus{
PodIPs: []api.PodIP{},
},
}
for _, ip := range ips {
p.Status.PodIPs = append(p.Status.PodIPs, api.PodIP{IP: ip})
}
return p
}
func TestServiceRegistryResourceLocation(t *testing.T) {
pods := []api.Pod{
makePod("unnamed", "1.2.3.4", "1.2.3.5"),
makePod("named", "1.2.3.6", "1.2.3.7"),
makePod("no-endpoints", "9.9.9.9"), // to prove this does not get chosen
}
endpoints := []*api.Endpoints{
epstest.MakeEndpoints("unnamed",
[]api.EndpointAddress{
epstest.MakeEndpointAddress("1.2.3.4", "unnamed"),
},
[]api.EndpointPort{
epstest.MakeEndpointPort("", 80),
}),
epstest.MakeEndpoints("unnamed2",
[]api.EndpointAddress{
epstest.MakeEndpointAddress("1.2.3.5", "unnamed"),
},
[]api.EndpointPort{
epstest.MakeEndpointPort("", 80),
}),
epstest.MakeEndpoints("named",
[]api.EndpointAddress{
epstest.MakeEndpointAddress("1.2.3.6", "named"),
},
[]api.EndpointPort{
epstest.MakeEndpointPort("p", 80),
epstest.MakeEndpointPort("q", 81),
}),
epstest.MakeEndpoints("no-endpoints", nil, nil), // to prove this does not get chosen
}
storage, server := NewTestRESTWithPods(t, endpoints, pods, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
ctx := genericapirequest.NewDefaultContext()
for _, name := range []string{"unnamed", "unnamed2", "no-endpoints"} {
_, err := storage.Create(ctx,
svctest.MakeService(name, svctest.SetPorts(
svctest.MakeServicePort("", 93, intstr.FromInt(80), api.ProtocolTCP))),
rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error creating service %q: %v", name, err)
}
}
_, err := storage.Create(ctx,
svctest.MakeService("named", svctest.SetPorts(
svctest.MakeServicePort("p", 93, intstr.FromInt(80), api.ProtocolTCP),
svctest.MakeServicePort("q", 76, intstr.FromInt(81), api.ProtocolTCP))),
rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error creating service %q: %v", "named", err)
}
redirector := rest.Redirector(storage)
cases := []struct {
query string
err bool
expect string
}{{
query: "unnamed",
expect: "//1.2.3.4:80",
}, {
query: "unnamed:",
expect: "//1.2.3.4:80",
}, {
query: "unnamed:93",
expect: "//1.2.3.4:80",
}, {
query: "http:unnamed:",
expect: "http://1.2.3.4:80",
}, {
query: "http:unnamed:93",
expect: "http://1.2.3.4:80",
}, {
query: "unnamed:80",
err: true,
}, {
query: "unnamed2",
expect: "//1.2.3.5:80",
}, {
query: "named:p",
expect: "//1.2.3.6:80",
}, {
query: "named:q",
expect: "//1.2.3.6:81",
}, {
query: "named:93",
expect: "//1.2.3.6:80",
}, {
query: "named:76",
expect: "//1.2.3.6:81",
}, {
query: "http:named:p",
expect: "http://1.2.3.6:80",
}, {
query: "http:named:q",
expect: "http://1.2.3.6:81",
}, {
query: "named:bad",
err: true,
}, {
query: "no-endpoints",
err: true,
}, {
query: "non-existent",
err: true,
}}
for _, tc := range cases {
t.Run(tc.query, func(t *testing.T) {
location, _, err := redirector.ResourceLocation(ctx, tc.query)
if tc.err == false && err != nil {
t.Fatalf("unexpected error: %v", err)
}
if tc.err == true && err == nil {
t.Fatalf("unexpected success")
}
if !tc.err {
if location == nil {
t.Errorf("unexpected location: %v", location)
}
if e, a := tc.expect, location.String(); e != a {
t.Errorf("expected %q, but got %q", e, a)
}
}
})
}
}
func TestServiceRegistryList(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
_, err := storage.Create(ctx, svctest.MakeService("foo"), rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = storage.Create(ctx, svctest.MakeService("foo2"), rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
s, _ := storage.List(ctx, nil)
sl := s.(*api.ServiceList)
if len(sl.Items) != 2 {
t.Fatalf("Expected 2 services, but got %v", len(sl.Items))
}
if e, a := "foo", sl.Items[0].Name; e != a {
t.Errorf("Expected %v, but got %v", e, a)
}
if e, a := "foo2", sl.Items[1].Name; e != a {
t.Errorf("Expected %v, but got %v", e, a)
}
}
func TestServiceRegistryIPAllocation(t *testing.T) {
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc1 := svctest.MakeService("foo")
ctx := genericapirequest.NewDefaultContext()
obj, err := storage.Create(ctx, svc1, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating service: %v", err)
}
createdSvc1 := obj.(*api.Service)
if createdSvc1.Name != "foo" {
t.Errorf("Expected foo, but got %v", createdSvc1.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(createdSvc1.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc1.Spec.ClusterIPs[0])
}
svc2 := svctest.MakeService("bar")
ctx = genericapirequest.NewDefaultContext()
obj, err = storage.Create(ctx, svc2, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating service: %v", err)
}
createdSvc2 := obj.(*api.Service)
if createdSvc2.Name != "bar" {
t.Errorf("Expected bar, but got %v", createdSvc2.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(createdSvc2.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc2.Spec.ClusterIPs[0])
}
testIPs := []string{"1.2.3.93", "1.2.3.94", "1.2.3.95", "1.2.3.96"}
testIP := "not-an-ip"
for _, ip := range testIPs {
if !ipIsAllocated(t, storage.serviceIPAllocatorsByFamily[storage.defaultServiceIPFamily].(*ipallocator.Range), ip) {
testIP = ip
break
}
}
svc3 := svctest.MakeService("qux", svctest.SetClusterIPs(testIP))
ctx = genericapirequest.NewDefaultContext()
obj, err = storage.Create(ctx, svc3, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
createdSvc3 := obj.(*api.Service)
if createdSvc3.Spec.ClusterIPs[0] != testIP { // specific IP
t.Errorf("Unexpected ClusterIP: %s", createdSvc3.Spec.ClusterIPs[0])
}
}
func TestServiceRegistryIPReallocation(t *testing.T) {
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc1 := svctest.MakeService("foo")
ctx := genericapirequest.NewDefaultContext()
obj, err := storage.Create(ctx, svc1, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating service: %v", err)
}
createdSvc1 := obj.(*api.Service)
if createdSvc1.Name != "foo" {
t.Errorf("Expected foo, but got %v", createdSvc1.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(createdSvc1.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc1.Spec.ClusterIPs[0])
}
_, _, err = storage.Delete(ctx, createdSvc1.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{})
if err != nil {
t.Errorf("Unexpected error deleting service: %v", err)
}
svc2 := svctest.MakeService("bar", svctest.SetClusterIPs(svc1.Spec.ClusterIP))
ctx = genericapirequest.NewDefaultContext()
obj, err = storage.Create(ctx, svc2, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating service: %v", err)
}
createdSvc2 := obj.(*api.Service)
if createdSvc2.Name != "bar" {
t.Errorf("Expected bar, but got %v", createdSvc2.Name)
}
if !makeIPNet(t).Contains(net.ParseIP(createdSvc2.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc2.Spec.ClusterIPs[0])
}
}
func TestServiceRegistryIPUpdate(t *testing.T) {
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := svctest.MakeService("foo")
ctx := genericapirequest.NewDefaultContext()
createdSvc, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
createdService := createdSvc.(*api.Service)
if createdService.Spec.Ports[0].Port != svc.Spec.Ports[0].Port {
t.Errorf("Expected port %d, but got %v", svc.Spec.Ports[0].Port, createdService.Spec.Ports[0].Port)
}
if !makeIPNet(t).Contains(net.ParseIP(createdService.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[0])
}
update := createdService.DeepCopy()
update.Spec.Ports[0].Port = 6503
updatedSvc, _, errUpdate := storage.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if errUpdate != nil {
t.Fatalf("unexpected error during update %v", errUpdate)
}
updatedService := updatedSvc.(*api.Service)
if updatedService.Spec.Ports[0].Port != 6503 {
t.Errorf("Expected port 6503, but got %v", updatedService.Spec.Ports[0].Port)
}
testIPs := []string{"1.2.3.93", "1.2.3.94", "1.2.3.95", "1.2.3.96"}
testIP := ""
for _, ip := range testIPs {
if !ipIsAllocated(t, storage.serviceIPAllocatorsByFamily[storage.defaultServiceIPFamily].(*ipallocator.Range), ip) {
testIP = ip
break
}
}
update = createdService.DeepCopy()
update.Spec.Ports[0].Port = 6503
update.Spec.ClusterIP = testIP
update.Spec.ClusterIPs[0] = testIP
_, _, err = storage.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err == nil || !errors.IsInvalid(err) {
t.Errorf("Unexpected error type: %v", err)
}
}
func TestServiceRegistryIPLoadBalancer(t *testing.T) {
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := svctest.MakeService("foo", svctest.SetTypeLoadBalancer)
ctx := genericapirequest.NewDefaultContext()
createdSvc, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if createdSvc == nil || err != nil {
t.Errorf("Unexpected failure creating service %v", err)
}
createdService := createdSvc.(*api.Service)
if createdService.Spec.Ports[0].Port != svc.Spec.Ports[0].Port {
t.Errorf("Expected port %d, but got %v", svc.Spec.Ports[0].Port, createdService.Spec.Ports[0].Port)
}
if !makeIPNet(t).Contains(net.ParseIP(createdService.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[0])
}
update := createdService.DeepCopy()
_, _, err = storage.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err != nil {
t.Errorf("Unexpected error %v", err)
}
}
// Validate allocation of a nodePort when ExternalTrafficPolicy is set to Local
// and type is LoadBalancer.
func TestServiceRegistryExternalTrafficHealthCheckNodePortAllocation(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := svctest.MakeService("external-lb-esipp",
svctest.SetTypeLoadBalancer,
func(s *api.Service) {
s.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal
},
)
obj, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if obj == nil || err != nil {
t.Errorf("Unexpected failure creating service %v", err)
}
createdSvc := obj.(*api.Service)
if !service.NeedsHealthCheck(createdSvc) {
t.Errorf("Expecting health check needed, returned health check not needed instead")
}
port := createdSvc.Spec.HealthCheckNodePort
if port == 0 {
t.Errorf("Failed to allocate health check node port and set the HealthCheckNodePort")
}
}
// Validate using the user specified nodePort when ExternalTrafficPolicy is set to Local
// and type is LoadBalancer.
func TestServiceRegistryExternalTrafficHealthCheckNodePortUserAllocation(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := svctest.MakeService("external-lb-esipp",
svctest.SetTypeLoadBalancer,
func(s *api.Service) {
// hard-code NodePort to make sure it doesn't conflict with the healthport.
// TODO: remove this once http://issue.k8s.io/93922 fixes auto-allocation conflicting with user-specified health check ports
s.Spec.Ports[0].NodePort = 30500
s.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal
s.Spec.HealthCheckNodePort = 30501
},
)
obj, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if obj == nil || err != nil {
t.Fatalf("Unexpected failure creating service :%v", err)
}
createdSvc := obj.(*api.Service)
if !service.NeedsHealthCheck(createdSvc) {
t.Errorf("Expecting health check needed, returned health check not needed instead")
}
port := createdSvc.Spec.HealthCheckNodePort
if port == 0 {
t.Errorf("Failed to allocate health check node port and set the HealthCheckNodePort")
}
if port != 30501 {
t.Errorf("Failed to allocate requested nodePort expected %d, got %d", 30501, port)
}
}
// Validate that the service creation fails when the requested port number is -1.
func TestServiceRegistryExternalTrafficHealthCheckNodePortNegative(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := svctest.MakeService("external-lb-esipp", svctest.SetTypeLoadBalancer, func(s *api.Service) {
s.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeLocal
s.Spec.HealthCheckNodePort = int32(-1)
})
obj, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if obj == nil || err != nil {
return
}
t.Errorf("Unexpected creation of service with invalid HealthCheckNodePort specified")
}
// Validate that the health check nodePort is not allocated when ExternalTrafficPolicy is set to Global.
func TestServiceRegistryExternalTrafficGlobal(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := svctest.MakeService("external-lb-esipp",
svctest.SetTypeLoadBalancer,
func(s *api.Service) {
s.Spec.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyTypeCluster
},
)
obj, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if obj == nil || err != nil {
t.Errorf("Unexpected failure creating service %v", err)
}
createdSvc := obj.(*api.Service)
if service.NeedsHealthCheck(createdSvc) {
t.Errorf("Expecting health check not needed, returned health check needed instead")
}
// Make sure the service does not have the health check node port allocated
port := createdSvc.Spec.HealthCheckNodePort
if port != 0 {
t.Errorf("Unexpected allocation of health check node port: %v", port)
}
}
// Validate the internalTrafficPolicy field when set to "Cluster" then updated to "Local"
func TestServiceRegistryInternalTrafficPolicyClusterThenLocal(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := svctest.MakeService("internal-traffic-policy-cluster",
svctest.SetInternalTrafficPolicy(api.ServiceInternalTrafficPolicyCluster),
)
obj, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if obj == nil || err != nil {
t.Errorf("Unexpected failure creating service %v", err)
}
createdSvc := obj.(*api.Service)
if *createdSvc.Spec.InternalTrafficPolicy != api.ServiceInternalTrafficPolicyCluster {
t.Errorf("Expecting internalTrafficPolicy field to have value Cluster, got: %s", *createdSvc.Spec.InternalTrafficPolicy)
}
update := createdSvc.DeepCopy()
local := api.ServiceInternalTrafficPolicyLocal
update.Spec.InternalTrafficPolicy = &local
updatedSvc, _, errUpdate := storage.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if errUpdate != nil {
t.Fatalf("unexpected error during update %v", errUpdate)
}
updatedService := updatedSvc.(*api.Service)
if *updatedService.Spec.InternalTrafficPolicy != api.ServiceInternalTrafficPolicyLocal {
t.Errorf("Expected internalTrafficPolicy to be Local, got: %s", *updatedService.Spec.InternalTrafficPolicy)
}
}
// Validate the internalTrafficPolicy field when set to "Local" and then updated to "Cluster"
func TestServiceRegistryInternalTrafficPolicyLocalThenCluster(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
svc := svctest.MakeService("internal-traffic-policy-cluster",
svctest.SetInternalTrafficPolicy(api.ServiceInternalTrafficPolicyLocal),
)
obj, err := storage.Create(ctx, svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if obj == nil || err != nil {
t.Errorf("Unexpected failure creating service %v", err)
}
createdSvc := obj.(*api.Service)
if *createdSvc.Spec.InternalTrafficPolicy != api.ServiceInternalTrafficPolicyLocal {
t.Errorf("Expecting internalTrafficPolicy field to have value Local, got: %s", *createdSvc.Spec.InternalTrafficPolicy)
}
update := createdSvc.DeepCopy()
cluster := api.ServiceInternalTrafficPolicyCluster
update.Spec.InternalTrafficPolicy = &cluster
updatedSvc, _, errUpdate := storage.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if errUpdate != nil {
t.Fatalf("unexpected error during update %v", errUpdate)
}
updatedService := updatedSvc.(*api.Service)
if *updatedService.Spec.InternalTrafficPolicy != api.ServiceInternalTrafficPolicyCluster {
t.Errorf("Expected internalTrafficPolicy to be Cluster, got: %s", *updatedService.Spec.InternalTrafficPolicy)
}
}
func TestInitClusterIP(t *testing.T) {
testCases := []struct {
name string
svc *api.Service
enableDualStackAllocator bool
preAllocateClusterIPs map[api.IPFamily]string
expectError bool
expectedCountIPs int
expectedClusterIPs []string
}{{
name: "Allocate single stack ClusterIP (v4)",
svc: svctest.MakeService("foo"),
enableDualStackAllocator: false,
expectError: false,
preAllocateClusterIPs: nil,
expectedCountIPs: 1,
}, {
name: "Allocate single ClusterIP (v6)",
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv6Protocol)),
expectError: false,
enableDualStackAllocator: true,
preAllocateClusterIPs: nil,
expectedCountIPs: 1,
}, {
name: "Allocate specified ClusterIP (v4)",
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol),
svctest.SetClusterIPs("1.2.3.4")),
expectError: false,
enableDualStackAllocator: true,
preAllocateClusterIPs: nil,
expectedCountIPs: 1,
expectedClusterIPs: []string{"1.2.3.4"},
}, {
name: "Allocate specified ClusterIP-v6",
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv6Protocol),
svctest.SetClusterIPs("2000:0:0:0:0:0:0:1")),
expectError: false,
enableDualStackAllocator: true,
expectedCountIPs: 1,
expectedClusterIPs: []string{"2000:0:0:0:0:0:0:1"},
}, {
name: "Allocate dual stack - on a non dual stack ",
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol)),
expectError: false,
enableDualStackAllocator: false,
expectedCountIPs: 1,
}, {
name: "Allocate dual stack - upgrade - v4, v6",
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
svctest.SetIPFamilies(api.IPv4Protocol)),
expectError: false,
enableDualStackAllocator: true,
expectedCountIPs: 2,
}, {
name: "Allocate dual stack - upgrade - v4, v6 - specific first IP",
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
svctest.SetIPFamilies(api.IPv4Protocol),
svctest.SetClusterIPs("1.2.3.4")),
expectError: false,
enableDualStackAllocator: true,
expectedCountIPs: 2,
expectedClusterIPs: []string{"1.2.3.4"},
}, {
name: "Allocate dual stack - upgrade - v6, v4",
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
svctest.SetIPFamilies(api.IPv6Protocol)),
expectError: false,
enableDualStackAllocator: true,
expectedCountIPs: 2,
}, {
name: "Allocate dual stack - v4, v6 - specific ips",
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetClusterIPs("1.2.3.4", "2000:0:0:0:0:0:0:1")),
expectError: false,
enableDualStackAllocator: true,
expectedCountIPs: 2,
expectedClusterIPs: []string{"1.2.3.4", "2000:0:0:0:0:0:0:1"},
}, {
name: "Allocate dual stack - upgrade - v6, v4 - specific ips",
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv6Protocol, api.IPv4Protocol),
svctest.SetClusterIPs("2000:0:0:0:0:0:0:1", "1.2.3.4")),
expectError: false,
enableDualStackAllocator: true,
expectedCountIPs: 2,
expectedClusterIPs: []string{"2000:0:0:0:0:0:0:1", "1.2.3.4"},
}, {
name: "Shouldn't allocate ClusterIP",
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None")),
expectError: false,
enableDualStackAllocator: false,
expectedCountIPs: 0,
}, {
name: "single stack, ip is pre allocated (ipv4)",
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack),
svctest.SetIPFamilies(api.IPv4Protocol),
svctest.SetClusterIPs("1.2.3.4")),
expectError: true,
enableDualStackAllocator: false,
expectedCountIPs: 0,
preAllocateClusterIPs: map[api.IPFamily]string{api.IPv4Protocol: "1.2.3.4"},
}, {
name: "single stack, ip is pre allocated (ipv6)",
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack),
svctest.SetIPFamilies(api.IPv6Protocol),
svctest.SetClusterIPs("2000:0:0:0:0:0:0:1")),
expectError: true,
enableDualStackAllocator: true, // ipv6 allocator is always the second one during test
expectedCountIPs: 0,
preAllocateClusterIPs: map[api.IPFamily]string{api.IPv6Protocol: "2000:0:0:0:0:0:0:1"},
}, {
name: "Allocate dual stack - upgrade - v6, v4 - specific ips (first ip can't be allocated)",
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv6Protocol, api.IPv4Protocol),
svctest.SetClusterIPs("2000:0:0:0:0:0:0:1", "1.2.3.4")),
expectError: true,
enableDualStackAllocator: true,
expectedCountIPs: 0,
preAllocateClusterIPs: map[api.IPFamily]string{api.IPv6Protocol: "2000:0:0:0:0:0:0:1"},
}, {
name: "Allocate dual stack - upgrade - v6, v4 - specific ips (second ip can't be allocated)",
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv6Protocol, api.IPv4Protocol),
svctest.SetClusterIPs("2000:0:0:0:0:0:0:1", "1.2.3.4")),
expectError: true,
enableDualStackAllocator: true,
expectedCountIPs: 0,
preAllocateClusterIPs: map[api.IPFamily]string{api.IPv4Protocol: "1.2.3.4"},
}}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, true)()
// create the rest stack
families := []api.IPFamily{api.IPv4Protocol}
if test.enableDualStackAllocator {
families = append(families, api.IPv6Protocol)
}
storage, server := NewTestREST(t, families)
defer server.Terminate(t)
copySvc := test.svc.DeepCopy()
// pre allocate ips if any
for family, ip := range test.preAllocateClusterIPs {
allocator, ok := storage.serviceIPAllocatorsByFamily[family]
if !ok {
t.Fatalf("test is incorrect, allocator does not exist on rest")
}
if err := allocator.Allocate(net.ParseIP(ip)); err != nil {
t.Fatalf("test is incorrect, allocator failed to pre allocate IP with error:%v", err)
}
}
ctx := genericapirequest.NewDefaultContext()
createdSvc, err := storage.Create(ctx, test.svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if test.expectError && err == nil {
t.Fatalf("error was expected, but no error was returned")
}
if !test.expectError && err != nil {
t.Fatalf("error was not expected, but got error %v", err)
}
if err != nil {
return // no more testing needed for this case
}
newSvc := createdSvc.(*api.Service)
isValidClusterIPFields(t, storage, copySvc, newSvc)
// if it has ips then let us check they have been correctly allocated
if newSvc.Spec.ClusterIPs[0] != api.ClusterIPNone {
for _, ip := range newSvc.Spec.ClusterIPs {
family := api.IPv4Protocol
if netutil.IsIPv6String(ip) {
family = api.IPv6Protocol
}
allocator := storage.serviceIPAllocatorsByFamily[family]
if !ipIsAllocated(t, allocator, ip) {
t.Fatalf("expected ip:%v to be allocated by %v allocator. it was not", ip, family)
}
}
}
allocatedIPs := 0
for _, ip := range newSvc.Spec.ClusterIPs {
if ip != api.ClusterIPNone {
allocatedIPs++
}
}
if allocatedIPs != test.expectedCountIPs {
t.Fatalf("incorrect allocated IP count expected %v got %v", test.expectedCountIPs, allocatedIPs)
}
for i, ip := range test.expectedClusterIPs {
if i >= len(newSvc.Spec.ClusterIPs) {
t.Fatalf("incorrect ips were assigne. expected to find %+v in %+v",
ip, newSvc.Spec.ClusterIPs)
}
if ip != newSvc.Spec.ClusterIPs[i] {
t.Fatalf("incorrect ips were assigne. expected to find %+v == %+v at position %v",
ip, newSvc.Spec.ClusterIPs[i], i)
}
}
// the following apply only on dual stack
if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) {
return
}
shouldUpgrade := len(newSvc.Spec.IPFamilies) == 2 && *(newSvc.Spec.IPFamilyPolicy) != api.IPFamilyPolicySingleStack && len(storage.serviceIPAllocatorsByFamily) == 2
if shouldUpgrade && len(newSvc.Spec.ClusterIPs) < 2 {
t.Fatalf("Service should have been upgraded %+v", newSvc)
}
if !shouldUpgrade && len(newSvc.Spec.ClusterIPs) > 1 {
t.Fatalf("Service should not have been upgraded %+v", newSvc)
}
})
}
}
func TestInitNodePorts(t *testing.T) {
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
nodePortOp := portallocator.StartOperation(storage.serviceNodePorts, false)
testCases := []struct {
name string
service *api.Service
expectSpecifiedNodePorts []int
}{{
name: "Service doesn't have specified NodePort",
service: svctest.MakeService("foo", svctest.SetTypeNodePort),
expectSpecifiedNodePorts: []int{},
}, {
name: "Service has one specified NodePort",
service: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6502), api.ProtocolTCP)),
svctest.SetNodePorts(30053)),
expectSpecifiedNodePorts: []int{30053},
}, {
name: "Service has two same ports with different protocols and specifies same NodePorts",
service: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6502), api.ProtocolTCP),
svctest.MakeServicePort("port-udp", 53, intstr.FromInt(6502), api.ProtocolUDP)),
svctest.SetNodePorts(30054, 30054)),
expectSpecifiedNodePorts: []int{30054, 30054},
}, {
name: "Service has two same ports with different protocols and specifies different NodePorts",
service: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6502), api.ProtocolTCP),
svctest.MakeServicePort("port-udp", 53, intstr.FromInt(6502), api.ProtocolUDP)),
svctest.SetNodePorts(30055, 30056)),
expectSpecifiedNodePorts: []int{30055, 30056},
}, {
name: "Service has two different ports with different protocols and specifies different NodePorts",
service: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6502), api.ProtocolTCP),
svctest.MakeServicePort("port-udp", 54, intstr.FromInt(6502), api.ProtocolUDP)),
svctest.SetNodePorts(30057, 30058)),
expectSpecifiedNodePorts: []int{30057, 30058},
}, {
name: "Service has two same ports with different protocols but only specifies one NodePort",
service: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6502), api.ProtocolTCP),
svctest.MakeServicePort("port-udp", 53, intstr.FromInt(6502), api.ProtocolUDP)),
svctest.SetNodePorts(30059)),
expectSpecifiedNodePorts: []int{30059, 30059},
}}
for _, test := range testCases {
err := initNodePorts(test.service, nodePortOp)
if err != nil {
t.Errorf("%q: unexpected error: %v", test.name, err)
continue
}
serviceNodePorts := collectServiceNodePorts(test.service)
if len(test.expectSpecifiedNodePorts) == 0 {
for _, nodePort := range serviceNodePorts {
if !storage.serviceNodePorts.Has(nodePort) {
t.Errorf("%q: unexpected NodePort %d, out of range", test.name, nodePort)
}
}
} else if !reflect.DeepEqual(serviceNodePorts, test.expectSpecifiedNodePorts) {
t.Errorf("%q: expected NodePorts %v, but got %v", test.name, test.expectSpecifiedNodePorts, serviceNodePorts)
}
for i := range serviceNodePorts {
nodePort := serviceNodePorts[i]
// Release the node port at the end of the test case.
storage.serviceNodePorts.Release(nodePort)
}
}
}
func TestUpdateNodePorts(t *testing.T) {
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol})
defer server.Terminate(t)
nodePortOp := portallocator.StartOperation(storage.serviceNodePorts, false)
testCases := []struct {
name string
oldService *api.Service
newService *api.Service
expectSpecifiedNodePorts []int
}{{
name: "Old service and new service have the same NodePort",
oldService: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("", 6502, intstr.FromInt(6502), api.ProtocolTCP)),
svctest.SetNodePorts(30053)),
newService: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("", 6502, intstr.FromInt(6502), api.ProtocolTCP)),
svctest.SetNodePorts(30053)),
expectSpecifiedNodePorts: []int{30053},
}, {
name: "Old service has more NodePorts than new service has",
oldService: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6502), api.ProtocolTCP),
svctest.MakeServicePort("port-udp", 53, intstr.FromInt(6502), api.ProtocolUDP)),
svctest.SetNodePorts(30053, 30053)),
newService: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6502), api.ProtocolTCP)),
svctest.SetNodePorts(30053)),
expectSpecifiedNodePorts: []int{30053},
}, {
name: "Change protocol of ServicePort without changing NodePort",
oldService: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6502), api.ProtocolTCP)),
svctest.SetNodePorts(30053)),
newService: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-udp", 53, intstr.FromInt(6502), api.ProtocolUDP)),
svctest.SetNodePorts(30053)),
expectSpecifiedNodePorts: []int{30053},
}, {
name: "Should allocate NodePort when changing service type to NodePort",
oldService: svctest.MakeService("foo",
svctest.SetTypeClusterIP,
svctest.SetPorts(
svctest.MakeServicePort("", 6502, intstr.FromInt(6502), api.ProtocolUDP))),
newService: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("", 6502, intstr.FromInt(6502), api.ProtocolUDP))),
expectSpecifiedNodePorts: []int{},
}, {
name: "Add new ServicePort with a different protocol without changing port numbers",
oldService: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6502), api.ProtocolTCP)),
svctest.SetNodePorts(30053)),
newService: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6502), api.ProtocolTCP),
svctest.MakeServicePort("port-udp", 53, intstr.FromInt(6502), api.ProtocolUDP)),
svctest.SetNodePorts(30053, 30053)),
expectSpecifiedNodePorts: []int{30053, 30053},
}, {
name: "Change service type from ClusterIP to NodePort with same NodePort number but different protocols",
oldService: svctest.MakeService("foo",
svctest.SetTypeClusterIP,
svctest.SetPorts(
svctest.MakeServicePort("", 53, intstr.FromInt(6502), api.ProtocolTCP))),
newService: svctest.MakeService("foo",
svctest.SetTypeNodePort,
svctest.SetPorts(
svctest.MakeServicePort("port-tcp", 53, intstr.FromInt(6502), api.ProtocolTCP),
svctest.MakeServicePort("port-udp", 53, intstr.FromInt(6502), api.ProtocolUDP)),
svctest.SetNodePorts(30053, 30053)),
expectSpecifiedNodePorts: []int{30053, 30053},
}}
for _, test := range testCases {
err := updateNodePorts(test.oldService, test.newService, nodePortOp)
if err != nil {
t.Errorf("%q: unexpected error: %v", test.name, err)
continue
}
serviceNodePorts := collectServiceNodePorts(test.newService)
if len(test.expectSpecifiedNodePorts) == 0 {
for _, nodePort := range serviceNodePorts {
if !storage.serviceNodePorts.Has(nodePort) {
t.Errorf("%q: unexpected NodePort %d, out of range", test.name, nodePort)
}
}
} else if !reflect.DeepEqual(serviceNodePorts, test.expectSpecifiedNodePorts) {
t.Errorf("%q: expected NodePorts %v, but got %v", test.name, test.expectSpecifiedNodePorts, serviceNodePorts)
}
for i := range serviceNodePorts {
nodePort := serviceNodePorts[i]
// Release the node port at the end of the test case.
storage.serviceNodePorts.Release(nodePort)
}
}
}
func TestServiceUpgrade(t *testing.T) {
requireDualStack := api.IPFamilyPolicyRequireDualStack
ctx := genericapirequest.NewDefaultContext()
testCases := []struct {
name string
updateFunc func(svc *api.Service)
enableDualStackAllocator bool
enableDualStackGate bool
allocateIPsBeforeUpdate map[api.IPFamily]string
expectUpgradeError bool
svc *api.Service
}{{
name: "normal, no upgrade needed",
enableDualStackAllocator: false,
enableDualStackGate: true,
allocateIPsBeforeUpdate: nil,
expectUpgradeError: false,
updateFunc: func(s *api.Service) {
s.Spec.Selector = map[string]string{"bar": "baz2"}
},
svc: svctest.MakeService("foo"),
}, {
name: "error, no upgrade (has single allocator)",
enableDualStackAllocator: false,
enableDualStackGate: true,
allocateIPsBeforeUpdate: nil,
expectUpgradeError: true,
updateFunc: func(s *api.Service) {
s.Spec.IPFamilyPolicy = &requireDualStack
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol}
},
svc: svctest.MakeService("foo", func(s *api.Service) {
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol}
}),
}, {
name: "upgrade to v4,6",
enableDualStackAllocator: true,
enableDualStackGate: true,
allocateIPsBeforeUpdate: nil,
expectUpgradeError: false,
updateFunc: func(s *api.Service) {
s.Spec.IPFamilyPolicy = &requireDualStack
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol}
},
svc: svctest.MakeService("foo", func(s *api.Service) {
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol}
}),
}, {
name: "upgrade to v4,6 (specific ip)",
enableDualStackAllocator: true,
enableDualStackGate: true,
allocateIPsBeforeUpdate: nil,
expectUpgradeError: false,
updateFunc: func(s *api.Service) {
s.Spec.IPFamilyPolicy = &requireDualStack
s.Spec.ClusterIPs = append(s.Spec.ClusterIPs, "2000:0:0:0:0:0:0:1")
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol}
},
svc: svctest.MakeService("foo", func(s *api.Service) {
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol}
}),
}, {
name: "upgrade to v4,6 (specific ip) - fail, ip is not available",
enableDualStackAllocator: true,
enableDualStackGate: true,
allocateIPsBeforeUpdate: map[api.IPFamily]string{api.IPv6Protocol: "2000:0:0:0:0:0:0:1"},
expectUpgradeError: true,
updateFunc: func(s *api.Service) {
s.Spec.IPFamilyPolicy = &requireDualStack
s.Spec.ClusterIPs = append(s.Spec.ClusterIPs, "2000:0:0:0:0:0:0:1")
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol}
},
svc: svctest.MakeService("foo", func(s *api.Service) {
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol}
}),
}, {
name: "upgrade to v6,4",
enableDualStackAllocator: true,
enableDualStackGate: true,
allocateIPsBeforeUpdate: nil,
expectUpgradeError: false,
updateFunc: func(s *api.Service) {
s.Spec.IPFamilyPolicy = &requireDualStack
s.Spec.IPFamilies = []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol}
},
svc: svctest.MakeService("foo", func(s *api.Service) {
s.Spec.IPFamilies = []api.IPFamily{api.IPv6Protocol}
}),
}, {
name: "upgrade to v6,4 (specific ip)",
enableDualStackAllocator: true,
enableDualStackGate: true,
allocateIPsBeforeUpdate: nil,
expectUpgradeError: false,
updateFunc: func(s *api.Service) {
s.Spec.IPFamilyPolicy = &requireDualStack
s.Spec.ClusterIPs = append(s.Spec.ClusterIPs, "1.2.3.4")
s.Spec.IPFamilies = []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol}
},
svc: svctest.MakeService("foo", func(s *api.Service) {
s.Spec.IPFamilies = []api.IPFamily{api.IPv6Protocol}
}),
}, {
name: "upgrade to v6,4 (specific ip) - fail ip is already allocated",
enableDualStackAllocator: true,
enableDualStackGate: true,
allocateIPsBeforeUpdate: map[api.IPFamily]string{api.IPv4Protocol: "1.2.3.4"},
expectUpgradeError: true,
updateFunc: func(s *api.Service) {
s.Spec.IPFamilyPolicy = &requireDualStack
s.Spec.ClusterIPs = append(s.Spec.ClusterIPs, "1.2.3.4")
s.Spec.IPFamilies = []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol}
},
svc: svctest.MakeService("foo", func(s *api.Service) {
s.Spec.IPFamilies = []api.IPFamily{api.IPv6Protocol}
}),
}}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
families := []api.IPFamily{api.IPv4Protocol}
if testCase.enableDualStackAllocator {
families = append(families, api.IPv6Protocol)
}
storage, server := NewTestREST(t, families)
defer server.Terminate(t)
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, testCase.enableDualStackGate)()
obj, err := storage.Create(ctx, testCase.svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("error is unexpected: %v", err)
}
createdSvc := obj.(*api.Service)
// allocated IP
for family, ip := range testCase.allocateIPsBeforeUpdate {
alloc := storage.serviceIPAllocatorsByFamily[family]
if err := alloc.Allocate(net.ParseIP(ip)); err != nil {
t.Fatalf("test is incorrect, unable to preallocate ip:%v", ip)
}
}
// run the modifier
testCase.updateFunc(createdSvc)
// run the update
updated, _, err := storage.Update(ctx,
createdSvc.Name,
rest.DefaultUpdatedObjectInfo(createdSvc),
rest.ValidateAllObjectFunc,
rest.ValidateAllObjectUpdateFunc,
false,
&metav1.UpdateOptions{})
if err != nil && !testCase.expectUpgradeError {
t.Fatalf("an error was not expected during upgrade %v", err)
}
if err == nil && testCase.expectUpgradeError {
t.Fatalf("error was expected during upgrade")
}
if err != nil {
return
}
updatedSvc := updated.(*api.Service)
isValidClusterIPFields(t, storage, updatedSvc, updatedSvc)
shouldUpgrade := len(createdSvc.Spec.IPFamilies) == 2 && *(createdSvc.Spec.IPFamilyPolicy) != api.IPFamilyPolicySingleStack && len(storage.serviceIPAllocatorsByFamily) == 2
if shouldUpgrade && len(updatedSvc.Spec.ClusterIPs) < 2 {
t.Fatalf("Service should have been upgraded %+v", createdSvc)
}
if !shouldUpgrade && len(updatedSvc.Spec.ClusterIPs) > 1 {
t.Fatalf("Service should not have been upgraded %+v", createdSvc)
}
// make sure that ips were allocated, correctly
for i, family := range updatedSvc.Spec.IPFamilies {
ip := updatedSvc.Spec.ClusterIPs[i]
allocator := storage.serviceIPAllocatorsByFamily[family]
if !ipIsAllocated(t, allocator, ip) {
t.Fatalf("expected ip:%v to be allocated by %v allocator. it was not", ip, family)
}
}
})
}
}
func TestServiceDowngrade(t *testing.T) {
requiredDualStack := api.IPFamilyPolicyRequireDualStack
singleStack := api.IPFamilyPolicySingleStack
ctx := genericapirequest.NewDefaultContext()
testCases := []struct {
name string
updateFunc func(svc *api.Service)
enableDualStackAllocator bool
enableDualStackGate bool
expectDowngradeError bool
svc *api.Service
}{{
name: "normal, no downgrade needed. single stack => single stack",
enableDualStackAllocator: true,
enableDualStackGate: true,
expectDowngradeError: false,
updateFunc: func(s *api.Service) { s.Spec.Selector = map[string]string{"bar": "baz2"} },
svc: svctest.MakeService("foo", func(s *api.Service) {
s.Spec.IPFamilyPolicy = &requiredDualStack
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol}
}),
}, {
name: "normal, no downgrade needed. dual stack => dual stack",
enableDualStackAllocator: true,
enableDualStackGate: true,
expectDowngradeError: false,
updateFunc: func(s *api.Service) { s.Spec.Selector = map[string]string{"bar": "baz2"} },
svc: svctest.MakeService("foo", func(s *api.Service) {
s.Spec.IPFamilyPolicy = &requiredDualStack
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol}
}),
}, {
name: "normal, downgrade v4,v6 => v4",
enableDualStackAllocator: true,
enableDualStackGate: true,
expectDowngradeError: false,
updateFunc: func(s *api.Service) {
s.Spec.IPFamilyPolicy = &singleStack
s.Spec.ClusterIPs = s.Spec.ClusterIPs[0:1]
s.Spec.IPFamilies = s.Spec.IPFamilies[0:1]
},
svc: svctest.MakeService("foo", func(s *api.Service) {
s.Spec.IPFamilyPolicy = &requiredDualStack
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol}
}),
}, {
name: "normal, downgrade v6,v4 => v6",
enableDualStackAllocator: true,
enableDualStackGate: true,
expectDowngradeError: false,
updateFunc: func(s *api.Service) {
s.Spec.IPFamilyPolicy = &singleStack
s.Spec.ClusterIPs = s.Spec.ClusterIPs[0:1]
s.Spec.IPFamilies = s.Spec.IPFamilies[0:1]
},
svc: svctest.MakeService("foo", func(s *api.Service) {
s.Spec.IPFamilyPolicy = &requiredDualStack
s.Spec.IPFamilies = []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol}
}),
}}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol})
defer server.Terminate(t)
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, testCase.enableDualStackGate)()
obj, err := storage.Create(ctx, testCase.svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("error is unexpected: %v", err)
}
createdSvc := obj.(*api.Service)
copySvc := createdSvc.DeepCopy()
// run the modifier
testCase.updateFunc(createdSvc)
// run the update
updated, _, err := storage.Update(ctx,
createdSvc.Name,
rest.DefaultUpdatedObjectInfo(createdSvc),
rest.ValidateAllObjectFunc,
rest.ValidateAllObjectUpdateFunc,
false,
&metav1.UpdateOptions{})
if err != nil && !testCase.expectDowngradeError {
t.Fatalf("an error was not expected during upgrade %v", err)
}
if err == nil && testCase.expectDowngradeError {
t.Fatalf("error was expected during upgrade")
}
if err != nil {
return
}
updatedSvc := updated.(*api.Service)
isValidClusterIPFields(t, storage, createdSvc, updatedSvc)
shouldDowngrade := len(copySvc.Spec.ClusterIPs) == 2 && *(createdSvc.Spec.IPFamilyPolicy) == api.IPFamilyPolicySingleStack
if shouldDowngrade && len(updatedSvc.Spec.ClusterIPs) > 1 {
t.Fatalf("Service should have been downgraded %+v", createdSvc)
}
if !shouldDowngrade && len(updatedSvc.Spec.ClusterIPs) < 2 {
t.Fatalf("Service should not have been downgraded %+v", createdSvc)
}
if shouldDowngrade {
releasedIP := copySvc.Spec.ClusterIPs[1]
releasedIPFamily := copySvc.Spec.IPFamilies[1]
allocator := storage.serviceIPAllocatorsByFamily[releasedIPFamily]
if ipIsAllocated(t, allocator, releasedIP) {
t.Fatalf("expected ip:%v to be released by %v allocator. it was not", releasedIP, releasedIPFamily)
}
}
})
}
}
func TestDefaultingValidation(t *testing.T) {
singleStack := api.IPFamilyPolicySingleStack
preferDualStack := api.IPFamilyPolicyPreferDualStack
requireDualStack := api.IPFamilyPolicyRequireDualStack
// takes in REST and modify it for a specific config
fnMakeSingleStackIPv4Allocator := func(rest *REST) {
rest.defaultServiceIPFamily = api.IPv4Protocol
rest.serviceIPAllocatorsByFamily = map[api.IPFamily]ipallocator.Interface{api.IPv4Protocol: rest.serviceIPAllocatorsByFamily[api.IPv4Protocol]}
}
fnMakeSingleStackIPv6Allocator := func(rest *REST) {
rest.defaultServiceIPFamily = api.IPv6Protocol
rest.serviceIPAllocatorsByFamily = map[api.IPFamily]ipallocator.Interface{api.IPv6Protocol: rest.serviceIPAllocatorsByFamily[api.IPv6Protocol]}
}
fnMakeDualStackStackIPv4IPv6Allocator := func(rest *REST) {
rest.defaultServiceIPFamily = api.IPv4Protocol
rest.serviceIPAllocatorsByFamily = map[api.IPFamily]ipallocator.Interface{
api.IPv6Protocol: rest.serviceIPAllocatorsByFamily[api.IPv6Protocol],
api.IPv4Protocol: rest.serviceIPAllocatorsByFamily[api.IPv4Protocol],
}
}
fnMakeDualStackStackIPv6IPv4Allocator := func(rest *REST) {
rest.defaultServiceIPFamily = api.IPv6Protocol
rest.serviceIPAllocatorsByFamily = map[api.IPFamily]ipallocator.Interface{
api.IPv6Protocol: rest.serviceIPAllocatorsByFamily[api.IPv6Protocol],
api.IPv4Protocol: rest.serviceIPAllocatorsByFamily[api.IPv4Protocol],
}
}
testCases := []struct {
name string
modifyRest func(rest *REST)
oldSvc *api.Service
svc *api.Service
expectedIPFamilyPolicy *api.IPFamilyPolicyType
expectedIPFamilies []api.IPFamily
expectError bool
}{
////////////////////////////
// cluster configured as single stack v4
////////////////////////////
{
name: "[singlestack:v4] set: externalname on a single stack - v4",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo", svctest.SetTypeExternalName),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: false,
},
{
name: "[singlestack:v4] set: nothing",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo"),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[singlestack:v4] set: v4Cluster IPSet",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("10.0.0.4")),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[singlestack:v4] set: v4IPFamilySet",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[singlestack:v4] set: v4IPFamilySet",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("10.0.0.4"),
svctest.SetIPFamilies(api.IPv4Protocol)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[singlestack:v4] set: PreferDualStack",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[singlestack:v4] set: PreferDualStack + v4ClusterIPSet",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
svctest.SetClusterIPs("10.0.0.4")),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[singlestack:v4] set: PreferDualStack + v4ClusterIPSet + v4FamilySet",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
svctest.SetIPFamilies(api.IPv4Protocol),
svctest.SetClusterIPs("10.0.0.4")),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[singlestack:v4] set: v6IPSet",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("2000::1")),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v4] set: v6IPFamily",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv6Protocol)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v4] set: RequireDualStack",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v4] set: RequireDualStack + family",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv4Protocol)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
// selector less
{
name: "[singlestack:v4] set: selectorless, families are ignored",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
svctest.SetIPFamilies(api.IPv6Protocol, api.IPv4Protocol),
func(s *api.Service) { s.Spec.Selector = nil }),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[singlestack:v4] set: selectorless, no families",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
func(s *api.Service) { s.Spec.Selector = nil }),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[singlestack:v4] set: selectorless, user selected",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack),
func(s *api.Service) { s.Spec.Selector = nil }),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[singlestack:v4] set: selectorless, user set to preferDualStack",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
func(s *api.Service) { s.Spec.Selector = nil }),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
// tests incorrect setting for IPFamilyPolicy
{
name: "[singlestack:v4] set: multifamily set to preferDualStack",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v4] set: multifamily set to singleStack",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v4] set: mult clusterips set to preferDualStack",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1", "2001::1"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v4] set: multi clusterips set to singleStack",
modifyRest: fnMakeSingleStackIPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1", "2001::1"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
////////////////////////////
// cluster configured as single stack v6
////////////////////////////
{
name: "[singlestack:v6] set: externalname on a single stack - v4",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo", svctest.SetTypeExternalName),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: false,
},
{
name: "[singlestack:v6] set: nothing",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo"),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[singlestack:v6] set: v6Cluster IPSet",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("2000::1")),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[singlestack:v6] set: v4IPFamilySet",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv6Protocol)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[singlestack:v6] set: v6IPFamilySet",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("2000::1"),
svctest.SetIPFamilies(api.IPv6Protocol)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[singlestack:v6] set: PreferDualStack",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[singlestack:v6] set: PreferDualStack + v6ClusterIPSet",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
svctest.SetClusterIPs("2000::1")),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[singlestack:v6] set: PreferDualStack + v6ClusterIPSet + v6FamilySet",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
svctest.SetIPFamilies(api.IPv6Protocol),
svctest.SetClusterIPs("2000::1")),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[singlestack:v6] set: v4IPSet",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("10.0.0.10")),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v6] set: v4IPFamily",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v6] set: RequireDualStack (on single stack ipv6 cluster)",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v6] set: RequireDualStack + family (on single stack ipv6 cluster)",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv4Protocol)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
// selector less
{
name: "[singlestack:v6] set: selectorless, families are ignored",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
svctest.SetIPFamilies(api.IPv6Protocol, api.IPv4Protocol),
func(s *api.Service) { s.Spec.Selector = nil }),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[singlestack:v6] set: selectorless, no families",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
func(s *api.Service) { s.Spec.Selector = nil }),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[singlestack:v6] set: selectorless, user selected",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack),
func(s *api.Service) { s.Spec.Selector = nil }),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[singlestack:v6] set: selectorless, user set to preferDualStack",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
func(s *api.Service) { s.Spec.Selector = nil }),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
// tests incorrect setting for IPFamilyPolicy
{
name: "[singlestack:v6] set: multifamily set to preferDualStack",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v6] set: multifamily set to singleStack",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v6] set: mult clusterips set to preferDualStack",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1", "2001::1"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[singlestack:v6] set: multi clusterips set to singleStack",
modifyRest: fnMakeSingleStackIPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1", "2001::1"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
////////////////////////////
// cluster configured as dual stack v4,6
////////////////////////////
{
name: "[dualstack:v4,v6] set: externalname on a dual stack - v4,v6",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo", svctest.SetTypeExternalName),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: false,
},
{
name: "[dualstack:v4,v6] set: nothing",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo"),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: v4ClusterIPSet",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("10.0.0.4")),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: v4IPFamilySet",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: v4IPFamilySet",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("10.0.0.4"),
svctest.SetIPFamilies(api.IPv4Protocol)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: v6ClusterIPSet",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("2000::1")),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: v6IPFamilySet",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv6Protocol)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: v6IPFamilySet",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("2000::1"),
svctest.SetIPFamilies(api.IPv6Protocol)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
// prefer dual stack
{
name: "[dualstack:v4,v6] set: PreferDualStack.",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: PreferDualStack + v4ClusterIPSet",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
svctest.SetClusterIPs("10.0.0.4")),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: PreferDualStack + v4ClusterIPSet + v4FamilySet",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
svctest.SetIPFamilies(api.IPv4Protocol),
svctest.SetClusterIPs("10.0.0.4")),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
// require dual stack
{
name: "[dualstack:v4,v6] set: RequireDualStack",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: RequireDualStack + family v4",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv4Protocol)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: RequireDualStack + family v6",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv6Protocol)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: RequireDualStack + family +ip v4",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("10.0.0.10"),
svctest.SetIPFamilies(api.IPv4Protocol)),
//
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: RequireDualStack + family +ip v6",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("2000::1"),
svctest.SetIPFamilies(api.IPv6Protocol)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: RequireDualStack + ip v6",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("2000::1")),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: RequireDualStack + ip v4",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("10.0.0.10")),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: RequireDualStack + ips",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("10.0.0.10", "2000::1")),
//
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: RequireDualStack + ips",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("2000::1", "10.0.0.10")),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: RequireDualStack + ips + families v6,v4",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("2000::1", "10.0.0.10"),
svctest.SetIPFamilies(api.IPv6Protocol, api.IPv4Protocol)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + ips + families v4,v6",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("10.0.0.10", "2000::1"),
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,v6] set: selectorless, no families",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
func(s *api.Service) { s.Spec.Selector = nil }),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,6] set: selectorless, user selected",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v4,6] set: selectorless, user set to prefer",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
// tests incorrect setting for IPFamilyPolicy
{
name: "[duakstack:v4,6] set: multifamily set to preferDualStack",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,6] set: multifamily set to singleStack",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[dualstack:v4,6] set: mult clusterips set to preferDualStack",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1", "2001::1"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,6] set: multi clusterips set to singleStack",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1", "2001::1"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
////////////////////////////
// cluster configured as dual stack v6,4
////////////////////////////
{
name: "[dualstack:v6,v4] set: externalname on a dual stack - v6,v4",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo", svctest.SetTypeExternalName),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: false,
},
{
name: "[dualstack:v6,v4] set: nothing",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo"),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: v4ClusterIPSet",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("10.0.0.4")),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: v4IPFamilySet",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol)),
//
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: v4IPFamilySet",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("10.0.0.4"),
svctest.SetIPFamilies(api.IPv4Protocol)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: v6ClusterIPSet",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("2000::1")),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: v6IPFamilySet",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv6Protocol)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: v6IPFamilySet",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("2000::1"),
svctest.SetIPFamilies(api.IPv6Protocol)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
// prefer dual stack
{
name: "[dualstack:v6,v4] set: PreferDualStack.",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: PreferDualStack + v4ClusterIPSet",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
svctest.SetClusterIPs("10.0.0.4")),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: PreferDualStack + v4ClusterIPSet + v4FamilySet",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack),
svctest.SetIPFamilies(api.IPv4Protocol),
svctest.SetClusterIPs("10.0.0.4")),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
// require dual stack
{
name: "[dualstack:v6,v4] set: RequireDualStack",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + family v4",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv4Protocol)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + family v6",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetIPFamilies(api.IPv6Protocol)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + family +ip v4",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("10.0.0.10"),
svctest.SetIPFamilies(api.IPv4Protocol)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + family +ip v6",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("2000::1"),
svctest.SetIPFamilies(api.IPv6Protocol)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + ip v6",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("2000::1")),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + ip v4",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("10.0.0.10")),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + ip v4",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("10.0.0.10")),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + ips",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("10.0.0.10", "2000::1")),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + ips",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("2000::1", "10.0.0.10")),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + ips + families v6,v4",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("2000::1", "10.0.0.10"),
svctest.SetIPFamilies(api.IPv6Protocol, api.IPv4Protocol)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: RequireDualStack + ips + families v4,v6",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack),
svctest.SetClusterIPs("10.0.0.10", "2000::1"),
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: selectorless, no families",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
func(s *api.Service) { s.Spec.Selector = nil }),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: selectorless, user selected",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,v4] set: selectorless, user set to prefer",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("None"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv6Protocol, api.IPv4Protocol},
expectError: false,
},
// tests incorrect setting for IPFamilyPolicy
{
name: "[duakstack:v6,5] set: multifamily set to preferDualStack",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v4,6] set: multifamily set to singleStack",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
{
name: "[dualstack:v6,4] set: mult clusterips set to preferDualStack",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1", "2001::1"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "[dualstack:v6,4] set: multi clusterips set to singleStack",
modifyRest: fnMakeDualStackStackIPv6IPv4Allocator,
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1", "2001::1"),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack)),
expectedIPFamilyPolicy: nil,
expectedIPFamilies: nil,
expectError: true,
},
// preferDualStack services should not be updated
// to match cluster config if the user didn't change any
// ClusterIPs related fields
{
name: "unchanged preferDualStack-1-ClusterUpgraded",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
oldSvc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1"),
svctest.SetIPFamilies(api.IPv4Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1"),
svctest.SetIPFamilies(api.IPv4Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
{
name: "unchanged preferDualStack-2-ClusterDowngraded",
modifyRest: fnMakeSingleStackIPv4Allocator,
oldSvc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1", "2001::1"),
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1", "2001::1"),
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
expectedIPFamilyPolicy: &preferDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "changed preferDualStack-1 (cluster upgraded)",
modifyRest: fnMakeDualStackStackIPv4IPv6Allocator,
oldSvc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1"),
svctest.SetIPFamilies(api.IPv4Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
svc: svctest.MakeService("foo",
svctest.SetIPFamilies(api.IPv4Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyRequireDualStack)),
expectedIPFamilyPolicy: &requireDualStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol},
expectError: false,
},
{
name: "changed preferDualStack-2-ClusterDowngraded",
modifyRest: fnMakeSingleStackIPv4Allocator,
oldSvc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1", "2001::1"),
svctest.SetIPFamilies(api.IPv4Protocol, api.IPv6Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicyPreferDualStack)),
svc: svctest.MakeService("foo",
svctest.SetClusterIPs("1.1.1.1"),
svctest.SetIPFamilies(api.IPv4Protocol),
svctest.SetIPFamilyPolicy(api.IPFamilyPolicySingleStack)),
expectedIPFamilyPolicy: &singleStack,
expectedIPFamilies: []api.IPFamily{api.IPv4Protocol},
expectError: false,
},
}
// This func only runs when feature gate is on
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, true)()
storage, server := NewTestREST(t, []api.IPFamily{api.IPv4Protocol, api.IPv6Protocol})
defer server.Terminate(t)
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
// reset to defaults
fnMakeDualStackStackIPv4IPv6Allocator(storage)
// optionally apply test-specific changes
if testCase.modifyRest != nil {
testCase.modifyRest(storage)
}
err := storage.tryDefaultValidateServiceClusterIPFields(testCase.oldSvc, testCase.svc)
if err != nil && !testCase.expectError {
t.Fatalf("error %v was not expected", err)
}
if err == nil && testCase.expectError {
t.Fatalf("error was expected, but no error returned")
}
if err != nil {
t.Logf("test concluded successfully with terminal error %v", err)
return
}
// IPFamily Policy
if (testCase.expectedIPFamilyPolicy == nil && testCase.svc.Spec.IPFamilyPolicy != nil) ||
(testCase.expectedIPFamilyPolicy != nil && testCase.svc.Spec.IPFamilyPolicy == nil) {
t.Fatalf("ipFamilyPolicy expected:%v got %v", testCase.expectedIPFamilyPolicy, testCase.svc.Spec.IPFamilyPolicy)
}
if testCase.expectedIPFamilyPolicy != nil {
if *testCase.expectedIPFamilyPolicy != *testCase.svc.Spec.IPFamilyPolicy {
t.Fatalf("ipFamilyPolicy expected:%s got %s", *testCase.expectedIPFamilyPolicy, *testCase.svc.Spec.IPFamilyPolicy)
}
}
if len(testCase.expectedIPFamilies) != len(testCase.svc.Spec.IPFamilies) {
t.Fatalf("expected len of IPFamilies %v got %v", len(testCase.expectedIPFamilies), len(testCase.svc.Spec.IPFamilies))
}
// match families
for i, family := range testCase.expectedIPFamilies {
if testCase.svc.Spec.IPFamilies[i] != family {
t.Fatalf("expected ip family %v at %v got %v", family, i, testCase.svc.Spec.IPFamilies)
}
}
})
}
}
// validates that the service created, updated by REST
// has correct ClusterIPs related fields
func isValidClusterIPFields(t *testing.T, storage *REST, pre *api.Service, post *api.Service) {
// valid for gate off/on scenarios
// ClusterIP
if len(post.Spec.ClusterIP) == 0 {
t.Fatalf("service must have clusterIP : %+v", post)
}
// cluster IPs
if len(post.Spec.ClusterIPs) == 0 {
t.Fatalf("new service must have at least one IP: %+v", post)
}
if post.Spec.ClusterIP != post.Spec.ClusterIPs[0] {
t.Fatalf("clusterIP does not match ClusterIPs[0]: %+v", post)
}
// if feature gate is not enabled then we need to ignore need fields
if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) {
if post.Spec.IPFamilyPolicy != nil {
t.Fatalf("service must be set to nil for IPFamilyPolicy: %+v", post)
}
if len(post.Spec.IPFamilies) != 0 {
t.Fatalf("service must be set to nil for IPFamilies: %+v", post)
}
return
}
// for gate on scenarios
// prefer dual stack field
if post.Spec.IPFamilyPolicy == nil {
t.Fatalf("service must not have nil for IPFamilyPolicy: %+v", post)
}
if pre.Spec.IPFamilyPolicy != nil && *(pre.Spec.IPFamilyPolicy) != *(post.Spec.IPFamilyPolicy) {
t.Fatalf("new service must not change PreferDualStack if it was set by user pre: %v post: %v", *(pre.Spec.IPFamilyPolicy), *(post.Spec.IPFamilyPolicy))
}
if pre.Spec.IPFamilyPolicy == nil && *(post.Spec.IPFamilyPolicy) != api.IPFamilyPolicySingleStack {
t.Fatalf("new services with prefer dual stack nil must be set to false (prefer dual stack) %+v", post)
}
// external name or headless services offer no more ClusterIPs field validation
if post.Spec.ClusterIPs[0] == api.ClusterIPNone {
return
}
// len of ClusteIPs can not be more than Families
// and for providedIPs it needs to match
// if families are provided then it shouldn't be changed
// this applies on first entry on
if len(pre.Spec.IPFamilies) > 0 {
if len(post.Spec.IPFamilies) == 0 {
t.Fatalf("allocator shouldn't remove ipfamilies[0] pre:%+v, post:%+v", pre.Spec.IPFamilies, post.Spec.IPFamilies)
}
if pre.Spec.IPFamilies[0] != post.Spec.IPFamilies[0] {
t.Fatalf("allocator shouldn't change post.Spec.IPFamilies[0] pre:%+v post:%+v", pre.Spec.IPFamilies, post.Spec.IPFamilies)
}
}
// if two families are assigned, then they must be dual stack
if len(post.Spec.IPFamilies) == 2 {
if post.Spec.IPFamilies[0] == post.Spec.IPFamilies[1] {
t.Fatalf("allocator assigned two of the same family %+v", post)
}
}
// ips must match families
for i, ip := range post.Spec.ClusterIPs {
isIPv6 := netutil.IsIPv6String(ip)
if isIPv6 && post.Spec.IPFamilies[i] != api.IPv6Protocol {
t.Fatalf("ips does not match assigned families %+v %+v", post.Spec.ClusterIPs, post.Spec.IPFamilies)
}
}
}
| apache-2.0 |
graydon/rust | src/test/ui/const-generics/auxiliary/generics_of_parent.rs | 491 | #![feature(generic_const_exprs)]
#![allow(incomplete_features)]
// library portion of regression test for #87674
pub struct Foo<const N: usize>([(); N + 1])
where
[(); N + 1]: ;
// library portion of regression test for #87603
pub struct S<T: Copy + Default, const N: usize>
where
[T; N * 2]: Sized,
{
pub s: [T; N * 2],
}
impl<T: Default + Copy, const N: usize> S<T, N>
where
[T; N * 2]: Sized,
{
pub fn test() -> Self {
S { s: [T::default(); N * 2] }
}
}
| apache-2.0 |
robin13/elasticsearch | server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java | 11548 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.search.sort;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.join.ToChildBlockJoinQuery;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.mapper.ObjectMapper;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.SearchExecutionContext;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.search.DocValueFormat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder;
import static org.elasticsearch.search.sort.NestedSortBuilder.FILTER_FIELD;
public abstract class SortBuilder<T extends SortBuilder<T>> implements NamedWriteable, ToXContentObject, Rewriteable<SortBuilder<?>> {
protected SortOrder order = SortOrder.ASC;
// parse fields common to more than one SortBuilder
public static final ParseField ORDER_FIELD = new ParseField("order");
private static final Map<String, Parser<?>> PARSERS = Map.of(
ScriptSortBuilder.NAME, ScriptSortBuilder::fromXContent,
GeoDistanceSortBuilder.NAME, GeoDistanceSortBuilder::fromXContent,
GeoDistanceSortBuilder.ALTERNATIVE_NAME, GeoDistanceSortBuilder::fromXContent,
// TODO: this can deadlock as it might access the ScoreSortBuilder (subclass) initializer from the SortBuilder initializer!!!
ScoreSortBuilder.NAME, ScoreSortBuilder::fromXContent);
/**
* Create a {@linkplain SortFieldAndFormat} from this builder.
*/
protected abstract SortFieldAndFormat build(SearchExecutionContext context) throws IOException;
/**
* Create a {@linkplain BucketedSort} which is useful for sorting inside of aggregations.
*/
public abstract BucketedSort buildBucketedSort(
SearchExecutionContext context,
BigArrays bigArrays,
int bucketSize,
BucketedSort.ExtraData extra
) throws IOException;
/**
* Set the order of sorting.
*/
@SuppressWarnings("unchecked")
public T order(SortOrder order) {
Objects.requireNonNull(order, "sort order cannot be null.");
this.order = order;
return (T) this;
}
/**
* Return the {@link SortOrder} used for this {@link SortBuilder}.
*/
public SortOrder order() {
return this.order;
}
public static List<SortBuilder<?>> fromXContent(XContentParser parser) throws IOException {
List<SortBuilder<?>> sortFields = new ArrayList<>(2);
XContentParser.Token token = parser.currentToken();
if (token == XContentParser.Token.START_ARRAY) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.START_OBJECT) {
parseCompoundSortField(parser, sortFields);
} else if (token == XContentParser.Token.VALUE_STRING) {
String fieldName = parser.text();
sortFields.add(fieldOrScoreSort(fieldName));
} else {
throw new IllegalArgumentException("malformed sort format, "
+ "within the sort array, an object, or an actual string are allowed");
}
}
} else if (token == XContentParser.Token.VALUE_STRING) {
String fieldName = parser.text();
sortFields.add(fieldOrScoreSort(fieldName));
} else if (token == XContentParser.Token.START_OBJECT) {
parseCompoundSortField(parser, sortFields);
} else {
throw new IllegalArgumentException("malformed sort format, either start with array, object, or an actual string");
}
return sortFields;
}
private static SortBuilder<?> fieldOrScoreSort(String fieldName) {
if (fieldName.equals(ScoreSortBuilder.NAME)) {
return new ScoreSortBuilder();
} else {
return new FieldSortBuilder(fieldName);
}
}
private static void parseCompoundSortField(XContentParser parser, List<SortBuilder<?>> sortFields)
throws IOException {
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String fieldName = parser.currentName();
token = parser.nextToken();
if (token == XContentParser.Token.VALUE_STRING) {
SortOrder order = SortOrder.fromString(parser.text());
sortFields.add(fieldOrScoreSort(fieldName).order(order));
} else {
if (PARSERS.containsKey(fieldName)) {
sortFields.add(PARSERS.get(fieldName).fromXContent(parser, fieldName));
} else {
sortFields.add(FieldSortBuilder.fromXContent(parser, fieldName));
}
}
}
}
}
public static Optional<SortAndFormats> buildSort(List<SortBuilder<?>> sortBuilders, SearchExecutionContext context) throws IOException {
List<SortField> sortFields = new ArrayList<>(sortBuilders.size());
List<DocValueFormat> sortFormats = new ArrayList<>(sortBuilders.size());
for (SortBuilder<?> builder : sortBuilders) {
SortFieldAndFormat sf = builder.build(context);
sortFields.add(sf.field);
sortFormats.add(sf.format);
}
if (sortFields.isEmpty() == false) {
// optimize if we just sort on score non reversed, we don't really
// need sorting
boolean sort;
if (sortFields.size() > 1) {
sort = true;
} else {
SortField sortField = sortFields.get(0);
if (sortField.getType() == SortField.Type.SCORE && sortField.getReverse() == false) {
sort = false;
} else {
sort = true;
}
}
if (sort) {
return Optional.of(new SortAndFormats(
new Sort(sortFields.toArray(new SortField[sortFields.size()])),
sortFormats.toArray(new DocValueFormat[sortFormats.size()])));
}
}
return Optional.empty();
}
protected static Nested resolveNested(SearchExecutionContext context, NestedSortBuilder nestedSort) throws IOException {
final Query childQuery = resolveNestedQuery(context, nestedSort, null);
if (childQuery == null) {
return null;
}
final ObjectMapper objectMapper = context.nestedScope().getObjectMapper();
final Query parentQuery;
if (objectMapper == null) {
parentQuery = Queries.newNonNestedFilter();
} else {
parentQuery = objectMapper.nestedTypeFilter();
}
return new Nested(context.bitsetFilter(parentQuery), childQuery, nestedSort, context.searcher());
}
private static Query resolveNestedQuery(SearchExecutionContext context,
NestedSortBuilder nestedSort,
Query parentQuery) throws IOException {
if (nestedSort == null || nestedSort.getPath() == null) {
return null;
}
String nestedPath = nestedSort.getPath();
QueryBuilder nestedFilter = nestedSort.getFilter();
NestedSortBuilder nestedNestedSort = nestedSort.getNestedSort();
// verify our nested path
ObjectMapper nestedObjectMapper = context.getObjectMapper(nestedPath);
if (nestedObjectMapper == null) {
throw new QueryShardException(context, "[nested] failed to find nested object under path [" + nestedPath + "]");
}
if (nestedObjectMapper.nested().isNested() == false) {
throw new QueryShardException(context, "[nested] nested object under path [" + nestedPath + "] is not of nested type");
}
ObjectMapper objectMapper = context.nestedScope().getObjectMapper();
// get our child query, potentially applying a users filter
Query childQuery;
try {
context.nestedScope().nextLevel(nestedObjectMapper);
if (nestedFilter != null) {
assert nestedFilter == Rewriteable.rewrite(nestedFilter, context) : "nested filter is not rewritten";
if (parentQuery == null) {
// this is for back-compat, original single level nested sorting never applied a nested type filter
childQuery = nestedFilter.toQuery(context);
} else {
childQuery = Queries.filtered(nestedObjectMapper.nestedTypeFilter(), nestedFilter.toQuery(context));
}
} else {
childQuery = nestedObjectMapper.nestedTypeFilter();
}
} finally {
context.nestedScope().previousLevel();
}
// apply filters from the previous nested level
if (parentQuery != null) {
if (objectMapper != null) {
childQuery = Queries.filtered(childQuery,
new ToChildBlockJoinQuery(parentQuery, context.bitsetFilter(objectMapper.nestedTypeFilter())));
}
}
// wrap up our parent and child and either process the next level of nesting or return
if (nestedNestedSort != null) {
try {
context.nestedScope().nextLevel(nestedObjectMapper);
return resolveNestedQuery(context, nestedNestedSort, childQuery);
} finally {
context.nestedScope().previousLevel();
}
} else {
return childQuery;
}
}
protected static QueryBuilder parseNestedFilter(XContentParser parser) {
try {
return parseInnerQueryBuilder(parser);
} catch (Exception e) {
throw new ParsingException(parser.getTokenLocation(), "Expected " + FILTER_FIELD.getPreferredName() + " element.", e);
}
}
@FunctionalInterface
private interface Parser<T extends SortBuilder<?>> {
T fromXContent(XContentParser parser, String elementName) throws IOException;
}
@Override
public String toString() {
return Strings.toString(this, true, true);
}
}
| apache-2.0 |
jimma/xerces | src/org/apache/xerces/dom/RangeExceptionImpl.java | 1192 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.xerces.dom;
import org.w3c.dom.ranges.RangeException;
/**
* @xerces.internal
*
* @version $Id$
*/
public class RangeExceptionImpl extends RangeException {
/** Serialization version. */
static final long serialVersionUID = -9058052627467240856L;
public RangeExceptionImpl(short code, String message) {
super(code,message);
}
}
| apache-2.0 |
adammulligan/new-dawn | cookbooks/nginx/test/integration/default/serverspec/default_spec.rb | 336 | require 'serverspec'
include Serverspec::Helper::Exec
include Serverspec::Helper::DetectOS
describe 'nginx::default' do
it 'installed nginx' do
expect(package('nginx')).to be_installed
end
it 'has the service up and running' do
expect(service('nginx')).to be_enabled
expect(service('nginx')).to be_running
end
end
| apache-2.0 |
dvizh/shop-skeleton | common/modules/dektrium/yii2-rbac/widgets/Menu.php | 2255 | <?php
/*
* This file is part of the Dektrium project.
*
* (c) Dektrium project <http://github.com/dektrium>
*
* For the full copyright and license information, please view the LICENSE.md
* file that was distributed with this source code.
*/
namespace dektrium\rbac\widgets;
use yii\bootstrap\Nav;
/**
* Menu widget.
*
* @author Dmitry Erofeev <[email protected]>
*/
class Menu extends Nav
{
/**
* @inheritdoc
*/
public $options = [
'class' => 'nav-tabs'
];
/**
* @inheritdoc
*/
public function init()
{
parent::init();
$userModuleClass = 'dektrium\user\Module';
$isUserModuleInstalled = \Yii::$app->getModule('user') instanceof $userModuleClass;
$this->items = [
[
'label' => \Yii::t('rbac', 'Users'),
'url' => ['/user/admin/index'],
'visible' => $isUserModuleInstalled,
],
[
'label' => \Yii::t('rbac', 'Roles'),
'url' => ['/rbac/role/index'],
],
[
'label' => \Yii::t('rbac', 'Permissions'),
'url' => ['/rbac/permission/index'],
],
[
'label' => \Yii::t('rbac', 'Rules'),
'url' => ['/rbac/rule/index'],
],
[
'label' => \Yii::t('rbac', 'Create'),
'items' => [
[
'label' =>\ Yii::t('rbac', 'New user'),
'url' => ['/user/admin/create'],
'visible' => $isUserModuleInstalled,
],
[
'label' => \Yii::t('rbac', 'New role'),
'url' => ['/rbac/role/create']
],
[
'label' => \Yii::t('rbac', 'New permission'),
'url' => ['/rbac/permission/create']
],
[
'label' => \Yii::t('rbac', 'New rule'),
'url' => ['/rbac/rule/create']
]
]
],
];
}
} | apache-2.0 |
Jedwondle/openstorefront | server/openstorefront/openstorefront-core/service/src/main/java/edu/usu/sdl/openstorefront/service/manager/model/JiraFieldInfoModel.java | 3275 | /*
* Copyright 2014 Space Dynamics Laboratory - Utah State University Research Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.usu.sdl.openstorefront.service.manager.model;
import com.atlassian.jira.rest.client.api.domain.CimFieldInfo;
import com.atlassian.jira.rest.client.api.domain.CustomFieldOption;
import java.util.ArrayList;
import java.util.List;
/**
*
* @author jlaw
*/
public class JiraFieldInfoModel
{
private String key;
private String id;
private String name;
private List<JiraFieldAllowedValues> allowedValues;
public JiraFieldInfoModel()
{
}
public static JiraFieldInfoModel toView(String key, CimFieldInfo info)
{
JiraFieldInfoModel view = new JiraFieldInfoModel();
view.setKey(key);
view.setId(info.getId());
view.setName(info.getName());
List<JiraFieldAllowedValues> values = new ArrayList<>();
if (info.getAllowedValues() != null) {
List<Object> allowedValues = ((List<Object>) info.getAllowedValues());
if (allowedValues.size() > 0 && allowedValues.get(0) instanceof CustomFieldOption) {
for (Object allowedValue : allowedValues) {
if (allowedValue instanceof CustomFieldOption) {
values.add(JiraFieldAllowedValues.toView((CustomFieldOption) allowedValue));
}
}
} else {
return null;
}
}
view.setAllowedValues(values);
return view;
}
public static JiraFieldInfoModel toView(JiraIssueType temp)
{
JiraFieldInfoModel view = new JiraFieldInfoModel();
view.setKey(temp.toString());
view.setId(temp.getId());
view.setName("Status");
List<JiraFieldAllowedValues> values = new ArrayList<>();
if (temp.getStatuses() != null) {
List<JiraStatus> statuses = temp.getStatuses();
if (statuses.size() > 0) {
for (JiraStatus status : statuses) {
values.add(JiraFieldAllowedValues.toView(status));
}
} else {
return null;
}
}
view.setAllowedValues(values);
return view;
}
/**
* @return the id
*/
public String getId()
{
return id;
}
/**
* @param id the id to set
*/
public void setId(String id)
{
this.id = id;
}
/**
* @return the name
*/
public String getName()
{
return name;
}
/**
* @param name the name to set
*/
public void setName(String name)
{
this.name = name;
}
/**
* @return the allowedValues
*/
public List<JiraFieldAllowedValues> getAllowedValues()
{
return allowedValues;
}
/**
* @param allowedValues the allowedValues to set
*/
public void setAllowedValues(List<JiraFieldAllowedValues> allowedValues)
{
this.allowedValues = allowedValues;
}
/**
* @return the key
*/
public String getKey()
{
return key;
}
/**
* @param key the key to set
*/
public void setKey(String key)
{
this.key = key;
}
}
| apache-2.0 |
weiwl/closure-compiler | src/com/google/javascript/jscomp/SimpleDefinitionFinder.java | 16190 | /*
* Copyright 2009 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.common.base.Preconditions;
import com.google.common.collect.LinkedHashMultimap;
import com.google.common.collect.Multimap;
import com.google.javascript.jscomp.DefinitionsRemover.Definition;
import com.google.javascript.jscomp.DefinitionsRemover.ExternalNameOnlyDefinition;
import com.google.javascript.jscomp.DefinitionsRemover.UnknownDefinition;
import com.google.javascript.jscomp.NodeTraversal.AbstractPostOrderCallback;
import com.google.javascript.jscomp.NodeTraversal.Callback;
import com.google.javascript.rhino.JSDocInfo;
import com.google.javascript.rhino.Node;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* Simple name-based definition gatherer that implements
* {@link DefinitionProvider}.
*
* It treats all variable writes as happening in the global scope and
* treats all objects as capable of having the same set of properties.
* The current implementation only handles definitions whose right
* hand side is an immutable value or function expression. All
* complex definitions are treated as unknowns.
*
*/
class SimpleDefinitionFinder implements CompilerPass, DefinitionProvider {
private final AbstractCompiler compiler;
private final Map<Node, DefinitionSite> definitionSiteMap;
private final Multimap<String, Definition> nameDefinitionMultimap;
private final Multimap<String, UseSite> nameUseSiteMultimap;
public SimpleDefinitionFinder(AbstractCompiler compiler) {
this.compiler = compiler;
this.definitionSiteMap = new LinkedHashMap<>();
this.nameDefinitionMultimap = LinkedHashMultimap.create();
this.nameUseSiteMultimap = LinkedHashMultimap.create();
}
/**
* Returns the collection of definition sites found during traversal.
*
* @return definition site collection.
*/
public Collection<DefinitionSite> getDefinitionSites() {
return definitionSiteMap.values();
}
private DefinitionSite getDefinitionAt(Node node) {
return definitionSiteMap.get(node);
}
DefinitionSite getDefinitionForFunction(Node function) {
Preconditions.checkState(function.isFunction());
return getDefinitionAt(getNameNodeFromFunctionNode(function));
}
@Override
public Collection<Definition> getDefinitionsReferencedAt(Node useSite) {
if (definitionSiteMap.containsKey(useSite)) {
return null;
}
if (useSite.isGetProp()) {
String propName = useSite.getLastChild().getString();
if (propName.equals("apply") || propName.equals("call")) {
useSite = useSite.getFirstChild();
}
}
String name = getSimplifiedName(useSite);
if (name != null) {
Collection<Definition> defs = nameDefinitionMultimap.get(name);
if (!defs.isEmpty()) {
return defs;
} else {
return null;
}
} else {
return null;
}
}
@Override
public void process(Node externs, Node source) {
NodeTraversal.traverse(
compiler, externs, new DefinitionGatheringCallback(true));
NodeTraversal.traverse(
compiler, source, new DefinitionGatheringCallback(false));
NodeTraversal.traverse(
compiler, source, new UseSiteGatheringCallback());
}
/**
* Returns a collection of use sites that may refer to provided
* definition. Returns an empty collection if the definition is not
* used anywhere.
*
* @param definition Definition of interest.
* @return use site collection.
*/
Collection<UseSite> getUseSites(Definition definition) {
String name = getSimplifiedName(definition.getLValue());
return nameUseSiteMultimap.get(name);
}
/**
* Extract a name from a node. In the case of GETPROP nodes,
* replace the namespace or object expression with "this" for
* simplicity and correctness at the expense of inefficiencies due
* to higher chances of name collisions.
*
* TODO(user) revisit. it would be helpful to at least use fully
* qualified names in the case of namespaces. Might not matter as
* much if this pass runs after "collapsing properties".
*/
private static String getSimplifiedName(Node node) {
if (node.isName()) {
String name = node.getString();
if (name != null && !name.isEmpty()) {
return name;
} else {
return null;
}
} else if (node.isGetProp()) {
return "this." + node.getLastChild().getString();
}
return null;
}
private class DefinitionGatheringCallback implements Callback {
private boolean inExterns;
DefinitionGatheringCallback(boolean inExterns) {
this.inExterns = inExterns;
}
@Override
public boolean shouldTraverse(NodeTraversal t, Node n, Node parent) {
if (inExterns) {
if (n.isFunction() && !n.getFirstChild().isName()) {
// No need to crawl functions in JSDoc
return false;
}
if (parent != null
&& parent.isFunction() && n != parent.getFirstChild()) {
// Arguments of external functions should not count as name
// definitions. They are placeholder names for documentation
// purposes only which are not reachable from anywhere.
return false;
}
}
return true;
}
@Override
public void visit(NodeTraversal traversal, Node node, Node parent) {
if (inExterns && node.getJSDocInfo() != null) {
for (Node typeRoot : node.getJSDocInfo().getTypeNodes()) {
traversal.traverse(typeRoot);
}
}
Definition def =
DefinitionsRemover.getDefinition(node, inExterns);
if (def != null) {
String name = getSimplifiedName(def.getLValue());
if (name != null) {
Node rValue = def.getRValue();
if ((rValue != null) &&
!NodeUtil.isImmutableValue(rValue) &&
!rValue.isFunction()) {
// Unhandled complex expression
Definition unknownDef =
new UnknownDefinition(def.getLValue(), inExterns);
def = unknownDef;
}
// TODO(johnlenz) : remove this stub dropping code if it becomes
// illegal to have untyped stubs in the externs definitions.
if (inExterns) {
// We need special handling of untyped externs stubs here:
// the stub should be dropped if the name is provided elsewhere.
List<Definition> stubsToRemove = new ArrayList<>();
// If there is no qualified name for this, then there will be
// no stubs to remove. This will happen if node is an object
// literal key.
if (node.isQualifiedName()) {
for (Definition prevDef : nameDefinitionMultimap.get(name)) {
if (prevDef instanceof ExternalNameOnlyDefinition
&& !jsdocContainsDeclarations(node)) {
if (node.matchesQualifiedName(prevDef.getLValue())) {
// Drop this stub, there is a real definition.
stubsToRemove.add(prevDef);
}
}
}
for (Definition prevDef : stubsToRemove) {
nameDefinitionMultimap.remove(name, prevDef);
}
}
}
nameDefinitionMultimap.put(name, def);
definitionSiteMap.put(node,
new DefinitionSite(node,
def,
traversal.getModule(),
traversal.inGlobalScope(),
inExterns));
}
}
if (inExterns && (parent != null) && parent.isExprResult()) {
String name = getSimplifiedName(node);
if (name != null) {
// TODO(johnlenz) : remove this code if it becomes illegal to have
// stubs in the externs definitions.
// We need special handling of untyped externs stubs here:
// the stub should be dropped if the name is provided elsewhere.
// We can't just drop the stub now as it needs to be used as the
// externs definition if no other definition is provided.
boolean dropStub = false;
if (!jsdocContainsDeclarations(node) && node.isQualifiedName()) {
for (Definition prevDef : nameDefinitionMultimap.get(name)) {
if (node.matchesQualifiedName(prevDef.getLValue())) {
dropStub = true;
break;
}
}
}
if (!dropStub) {
// Incomplete definition
Definition definition = new ExternalNameOnlyDefinition(node);
nameDefinitionMultimap.put(name, definition);
definitionSiteMap.put(node,
new DefinitionSite(node,
definition,
traversal.getModule(),
traversal.inGlobalScope(),
inExterns));
}
}
}
}
/**
* @return Whether the node has a JSDoc that actually declares something.
*/
private boolean jsdocContainsDeclarations(Node node) {
JSDocInfo info = node.getJSDocInfo();
return (info != null && info.containsDeclaration());
}
}
private class UseSiteGatheringCallback extends AbstractPostOrderCallback {
@Override
public void visit(NodeTraversal traversal, Node node, Node parent) {
Collection<Definition> defs = getDefinitionsReferencedAt(node);
if (defs == null) {
return;
}
Definition first = defs.iterator().next();
String name = getSimplifiedName(first.getLValue());
Preconditions.checkNotNull(name);
nameUseSiteMultimap.put(
name,
new UseSite(node, traversal.getScope(), traversal.getModule()));
}
}
/**
* @param use A use site to check.
* @return Whether the use is a call or new.
*/
static boolean isCallOrNewSite(UseSite use) {
Node call = use.node.getParent();
if (call == null) {
// The node has been removed from the AST.
return false;
}
// We need to make sure we're dealing with a call to the function we're
// optimizing. If the the first child of the parent is not the site, this
// is a nested call and it's a call to another function.
return NodeUtil.isCallOrNew(call) && call.getFirstChild() == use.node;
}
boolean canModifyDefinition(Definition definition) {
if (isExported(definition)) {
return false;
}
// Don't modify unused definitions for two reasons:
// 1) It causes unnecessary churn
// 2) Other definitions might be used to reflect on this one using
// goog.reflect.object (the check for definitions with uses is below).
Collection<UseSite> useSites = getUseSites(definition);
if (useSites.isEmpty()) {
return false;
}
for (UseSite site : useSites) {
// This catches the case where an object literal in goog.reflect.object
// and a prototype method have the same property name.
// NOTE(nicksantos): Maps and trogedit both do this by different
// mechanisms.
Node nameNode = site.node;
Collection<Definition> singleSiteDefinitions =
getDefinitionsReferencedAt(nameNode);
if (singleSiteDefinitions.size() > 1) {
return false;
}
Preconditions.checkState(!singleSiteDefinitions.isEmpty());
Preconditions.checkState(singleSiteDefinitions.contains(definition));
}
return true;
}
/**
* @return Whether the definition is directly exported.
*/
private boolean isExported(Definition definition) {
// Assume an exported method result is used.
Node lValue = definition.getLValue();
if (lValue == null) {
return true;
}
String partialName;
if (lValue.isGetProp()) {
partialName = lValue.getLastChild().getString();
} else if (lValue.isName()) {
partialName = lValue.getString();
} else {
// GETELEM is assumed to be an export or other expression are unknown
// uses.
return true;
}
CodingConvention codingConvention = compiler.getCodingConvention();
return codingConvention.isExported(partialName);
}
/**
* @return Whether the function is defined in a non-aliasing expression.
*/
static boolean isSimpleFunctionDeclaration(Node fn) {
Node parent = fn.getParent();
Node gramps = parent.getParent();
// Simple definition finder doesn't provide useful results in some
// cases, specifically:
// - functions with recursive definitions
// - functions defined in object literals
// - functions defined in array literals
// Here we defined a set of known function declaration that are 'ok'.
// Some projects seem to actually define "JSCompiler_renameProperty"
// rather than simply having an extern definition. Don't mess with it.
Node nameNode = SimpleDefinitionFinder.getNameNodeFromFunctionNode(fn);
if (nameNode != null
&& nameNode.isName()) {
String name = nameNode.getString();
if (name.equals(NodeUtil.JSC_PROPERTY_NAME_FN) ||
name.equals(
ObjectPropertyStringPreprocess.EXTERN_OBJECT_PROPERTY_STRING)) {
return false;
}
}
// example: function a(){};
if (NodeUtil.isFunctionDeclaration(fn)) {
return true;
}
// example: a = function(){};
// example: var a = function(){};
return fn.getFirstChild().getString().isEmpty()
&& (NodeUtil.isExprAssign(gramps) || parent.isName());
}
/**
* @return the node defining the name for this function (if any).
*/
static Node getNameNodeFromFunctionNode(Node function) {
Preconditions.checkState(function.isFunction());
if (NodeUtil.isFunctionDeclaration(function)) {
return function.getFirstChild();
} else {
Node parent = function.getParent();
if (NodeUtil.isVarDeclaration(parent)) {
return parent;
} else if (parent.isAssign()) {
return parent.getFirstChild();
} else if (NodeUtil.isObjectLitKey(parent)) {
return parent;
}
}
return null;
}
/**
* Traverse a node and its children and remove any references to from
* the structures.
*/
void removeReferences(Node node) {
if (DefinitionsRemover.isDefinitionNode(node)) {
DefinitionSite defSite = definitionSiteMap.get(node);
if (defSite != null) {
Definition def = defSite.definition;
String name = getSimplifiedName(def.getLValue());
if (name != null) {
this.definitionSiteMap.remove(node);
this.nameDefinitionMultimap.remove(name, node);
}
}
} else {
Node useSite = node;
if (useSite.isGetProp()) {
String propName = useSite.getLastChild().getString();
if (propName.equals("apply") || propName.equals("call")) {
useSite = useSite.getFirstChild();
}
}
String name = getSimplifiedName(useSite);
if (name != null) {
this.nameUseSiteMultimap.remove(name, new UseSite(useSite, null, null));
}
}
for (Node child : node.children()) {
removeReferences(child);
}
}
}
| apache-2.0 |
anindoasaha/php_nginx | php-5.5.16/ext/gd/tests/imagecolordeallocate_basic.phpt | 420 | --TEST--
Testing imagecolordeallocate() of GD library
--CREDITS--
Rafael Dohms <rdohms [at] gmail [dot] com>
#testfest PHPSP on 2009-06-20
--SKIPIF--
<?php
if (!extension_loaded("gd")) die("skip GD not present");
?>
--FILE--
<?php
$image = imagecreatetruecolor(180, 30);
$white = imagecolorallocate($image, 255, 255, 255);
$result = imagecolordeallocate($image, $white);
var_dump($result);
?>
--EXPECT--
bool(true)
| apache-2.0 |
0x73/rust | src/test/compile-fail/issue-17718-const-naming.rs | 611 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[deny(warnings)]
const foo: int = 3;
//~^ ERROR: should have an uppercase name such as
//~^^ ERROR: constant item is never used
fn main() {}
| apache-2.0 |
graydon/rust | src/test/ui/parser/float-literals.rs | 557 | // build-pass
// ignore-tidy-linelength
// Regression test for #31109 and #31407.
pub fn main() {
let _: f64 = 0.3333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333;
let _: f64 = 1234567890123456789012345678901234567890e-340;
}
| apache-2.0 |
GunoH/intellij-community | plugins/hg4idea/src/org/zmlx/hg4idea/repo/HgRepositoryImpl.java | 8482 | // Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package org.zmlx.hg4idea.repo;
import com.intellij.dvcs.ignore.VcsIgnoredHolderUpdateListener;
import com.intellij.dvcs.repo.RepositoryImpl;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.progress.util.BackgroundTaskUtil;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vcs.AbstractVcs;
import com.intellij.openapi.vcs.FilePath;
import com.intellij.openapi.vcs.changes.ChangeListManagerImpl;
import com.intellij.openapi.vcs.changes.VcsManagedFilesHolder;
import com.intellij.openapi.vfs.VfsUtilCore;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.vcs.log.Hash;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.zmlx.hg4idea.HgNameWithHashInfo;
import org.zmlx.hg4idea.HgVcs;
import org.zmlx.hg4idea.command.HgBranchesCommand;
import org.zmlx.hg4idea.execution.HgCommandResult;
import org.zmlx.hg4idea.provider.HgLocalIgnoredHolder;
import org.zmlx.hg4idea.util.HgUtil;
import java.util.*;
public final class HgRepositoryImpl extends RepositoryImpl implements HgRepository {
private static final Logger LOG = Logger.getInstance(HgRepositoryImpl.class);
@NotNull private final HgVcs myVcs;
@NotNull private final HgRepositoryReader myReader;
@NotNull private final VirtualFile myHgDir;
@NotNull private volatile HgRepoInfo myInfo;
@NotNull private Set<String> myOpenedBranches = Collections.emptySet();
@NotNull private volatile HgConfig myConfig;
private final HgLocalIgnoredHolder myLocalIgnoredHolder;
@SuppressWarnings("ConstantConditions")
private HgRepositoryImpl(@NotNull VirtualFile rootDir, @NotNull HgVcs vcs,
@NotNull Disposable parentDisposable) {
super(vcs.getProject(), rootDir, parentDisposable);
myVcs = vcs;
myHgDir = rootDir.findChild(HgUtil.DOT_HG);
assert myHgDir != null : ".hg directory wasn't found under " + rootDir.getPresentableUrl();
myReader = new HgRepositoryReader(vcs, VfsUtilCore.virtualToIoFile(myHgDir));
myConfig = HgConfig.getInstance(getProject(), rootDir);
myLocalIgnoredHolder = new HgLocalIgnoredHolder(this, HgUtil.getRepositoryManager(getProject()));
myLocalIgnoredHolder.setupListeners();
Disposer.register(this, myLocalIgnoredHolder);
myLocalIgnoredHolder.addUpdateStateListener(new MyIgnoredHolderAsyncListener(getProject()));
update();
}
@NotNull
public static HgRepository getInstance(@NotNull VirtualFile root, @NotNull Project project,
@NotNull Disposable parentDisposable) {
HgVcs vcs = HgVcs.getInstance(project);
if (vcs == null) {
throw new IllegalArgumentException("Vcs not found for project " + project);
}
HgRepositoryImpl repository = new HgRepositoryImpl(root, vcs, parentDisposable);
repository.setupUpdater();
return repository;
}
private void setupUpdater() {
HgRepositoryUpdater updater = new HgRepositoryUpdater(this);
Disposer.register(this, updater);
myLocalIgnoredHolder.startRescan();
}
@NotNull
@Override
public VirtualFile getHgDir() {
return myHgDir;
}
@NotNull
@Override
public State getState() {
return myInfo.getState();
}
/**
* Return active bookmark name if exist or heavy branch name otherwise
*/
@Nullable
@Override
public String getCurrentBranchName() {
String branchOrBookMarkName = getCurrentBookmark();
if (StringUtil.isEmptyOrSpaces(branchOrBookMarkName)) {
branchOrBookMarkName = getCurrentBranch();
}
return branchOrBookMarkName;
}
@NotNull
@Override
public AbstractVcs getVcs() {
return myVcs;
}
@Override
@NotNull
public String getCurrentBranch() {
return myInfo.getCurrentBranch();
}
@Override
@Nullable
public String getCurrentRevision() {
return myInfo.getCurrentRevision();
}
@Override
@Nullable
public String getTipRevision() {
return myInfo.getTipRevision();
}
@Override
@NotNull
public Map<String, LinkedHashSet<Hash>> getBranches() {
return myInfo.getBranches();
}
@Override
@NotNull
public Set<String> getOpenedBranches() {
return myOpenedBranches;
}
@NotNull
@Override
public Collection<HgNameWithHashInfo> getBookmarks() {
return myInfo.getBookmarks();
}
@Nullable
@Override
public String getCurrentBookmark() {
return myInfo.getCurrentBookmark();
}
@NotNull
@Override
public Collection<HgNameWithHashInfo> getTags() {
return myInfo.getTags();
}
@NotNull
@Override
public Collection<HgNameWithHashInfo> getLocalTags() {
return myInfo.getLocalTags();
}
@NotNull
@Override
public HgConfig getRepositoryConfig() {
return myConfig;
}
@Override
public boolean hasSubrepos() {
return myInfo.hasSubrepos();
}
@Override
@NotNull
public Collection<HgNameWithHashInfo> getSubrepos() {
return myInfo.getSubrepos();
}
@NotNull
@Override
public List<HgNameWithHashInfo> getMQAppliedPatches() {
return myInfo.getMQApplied();
}
@NotNull
@Override
public List<String> getAllPatchNames() {
return myInfo.getMqPatchNames();
}
@NotNull
@Override
public List<String> getUnappliedPatchNames() {
final List<String> appliedPatches = HgUtil.getNamesWithoutHashes(getMQAppliedPatches());
return ContainerUtil.filter(getAllPatchNames(), s -> !appliedPatches.contains(s));
}
@Override
public void update() {
HgRepoInfo currentInfo = readRepoInfo();
// update only if something changed!!! if update every time - new log will be refreshed every time, too.
// Then blinking and do not work properly;
final Project project = getProject();
if (!project.isDisposed() && !currentInfo.equals(myInfo)) {
myInfo = currentInfo;
HgCommandResult branchCommandResult = new HgBranchesCommand(project, getRoot()).collectBranches();
if (branchCommandResult == null || branchCommandResult.getExitValue() != 0) {
LOG.warn("Could not collect hg opened branches."); // hg executable is not valid
myOpenedBranches = myInfo.getBranches().keySet();
}
else {
myOpenedBranches = HgBranchesCommand.collectNames(branchCommandResult);
}
BackgroundTaskUtil.executeOnPooledThread(this, ()
-> BackgroundTaskUtil.syncPublisher(project, HgVcs.STATUS_TOPIC).update(project, getRoot()));
}
}
@NonNls
@NotNull
@Override
public String toLogString() {
return "HgRepository " + getRoot() + " : " + myInfo;
}
@NotNull
private HgRepoInfo readRepoInfo() {
//in GitRepositoryImpl there are temporary state object for reader fields storing! Todo Check;
return
new HgRepoInfo(myReader.readCurrentBranch(), myReader.readCurrentRevision(), myReader.readCurrentTipRevision(), myReader.readState(),
myReader.readBranches(),
myReader.readBookmarks(), myReader.readCurrentBookmark(), myReader.readTags(), myReader.readLocalTags(),
myReader.readSubrepos(), myReader.readMQAppliedPatches(), myReader.readMqPatchNames());
}
@Override
public void updateConfig() {
myConfig = HgConfig.getInstance(getProject(), getRoot());
}
@NotNull
@Override
public HgLocalIgnoredHolder getIgnoredFilesHolder() {
return myLocalIgnoredHolder;
}
private static class MyIgnoredHolderAsyncListener implements VcsIgnoredHolderUpdateListener {
@NotNull private final Project myProject;
MyIgnoredHolderAsyncListener(@NotNull Project project) {
myProject = project;
}
@Override
public void updateStarted() {
BackgroundTaskUtil.syncPublisher(myProject, VcsManagedFilesHolder.TOPIC).updatingModeChanged();
}
@Override
public void updateFinished(@NotNull Collection<FilePath> ignoredPaths, boolean isFullRescan) {
if(myProject.isDisposed()) return;
BackgroundTaskUtil.syncPublisher(myProject, VcsManagedFilesHolder.TOPIC).updatingModeChanged();
ChangeListManagerImpl.getInstanceImpl(myProject).notifyUnchangedFileStatusChanged();
}
}
}
| apache-2.0 |
westmisfit/origin | Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/config.go | 11298 | /*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"reflect"
"sync"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/config"
utilerrors "github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/fielderrors"
"github.com/golang/glog"
)
// PodConfigNotificationMode describes how changes are sent to the update channel.
type PodConfigNotificationMode int
const (
// PodConfigNotificationSnapshot delivers the full configuration as a SET whenever
// any change occurs.
PodConfigNotificationSnapshot = iota
// PodConfigNotificationSnapshotAndUpdates delivers an UPDATE message whenever pods are
// changed, and a SET message if there are any additions or removals.
PodConfigNotificationSnapshotAndUpdates
// PodConfigNotificationIncremental delivers ADD, UPDATE, and REMOVE to the update channel.
PodConfigNotificationIncremental
)
// PodConfig is a configuration mux that merges many sources of pod configuration into a single
// consistent structure, and then delivers incremental change notifications to listeners
// in order.
type PodConfig struct {
pods *podStorage
mux *config.Mux
// the channel of denormalized changes passed to listeners
updates chan kubelet.PodUpdate
// contains the list of all configured sources
sourcesLock sync.Mutex
sources util.StringSet
}
// NewPodConfig creates an object that can merge many configuration sources into a stream
// of normalized updates to a pod configuration.
func NewPodConfig(mode PodConfigNotificationMode, recorder record.EventRecorder) *PodConfig {
updates := make(chan kubelet.PodUpdate, 50)
storage := newPodStorage(updates, mode, recorder)
podConfig := &PodConfig{
pods: storage,
mux: config.NewMux(storage),
updates: updates,
sources: util.StringSet{},
}
return podConfig
}
// Channel creates or returns a config source channel. The channel
// only accepts PodUpdates
func (c *PodConfig) Channel(source string) chan<- interface{} {
c.sourcesLock.Lock()
defer c.sourcesLock.Unlock()
c.sources.Insert(source)
return c.mux.Channel(source)
}
// SeenAllSources returns true if this config has received a SET
// message from all configured sources, false otherwise.
func (c *PodConfig) SeenAllSources() bool {
if c.pods == nil {
return false
}
glog.V(6).Infof("Looking for %v, have seen %v", c.sources.List(), c.pods.sourcesSeen)
return c.pods.seenSources(c.sources.List()...)
}
// Updates returns a channel of updates to the configuration, properly denormalized.
func (c *PodConfig) Updates() <-chan kubelet.PodUpdate {
return c.updates
}
// Sync requests the full configuration be delivered to the update channel.
func (c *PodConfig) Sync() {
c.pods.Sync()
}
// podStorage manages the current pod state at any point in time and ensures updates
// to the channel are delivered in order. Note that this object is an in-memory source of
// "truth" and on creation contains zero entries. Once all previously read sources are
// available, then this object should be considered authoritative.
type podStorage struct {
podLock sync.RWMutex
// map of source name to pod name to pod reference
pods map[string]map[string]*api.Pod
mode PodConfigNotificationMode
// ensures that updates are delivered in strict order
// on the updates channel
updateLock sync.Mutex
updates chan<- kubelet.PodUpdate
// contains the set of all sources that have sent at least one SET
sourcesSeenLock sync.Mutex
sourcesSeen util.StringSet
// the EventRecorder to use
recorder record.EventRecorder
}
// TODO: PodConfigNotificationMode could be handled by a listener to the updates channel
// in the future, especially with multiple listeners.
// TODO: allow initialization of the current state of the store with snapshotted version.
func newPodStorage(updates chan<- kubelet.PodUpdate, mode PodConfigNotificationMode, recorder record.EventRecorder) *podStorage {
return &podStorage{
pods: make(map[string]map[string]*api.Pod),
mode: mode,
updates: updates,
sourcesSeen: util.StringSet{},
recorder: recorder,
}
}
// Merge normalizes a set of incoming changes from different sources into a map of all Pods
// and ensures that redundant changes are filtered out, and then pushes zero or more minimal
// updates onto the update channel. Ensures that updates are delivered in order.
func (s *podStorage) Merge(source string, change interface{}) error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
adds, updates, deletes := s.merge(source, change)
// deliver update notifications
switch s.mode {
case PodConfigNotificationIncremental:
if len(deletes.Pods) > 0 {
s.updates <- *deletes
}
if len(adds.Pods) > 0 {
s.updates <- *adds
}
if len(updates.Pods) > 0 {
s.updates <- *updates
}
case PodConfigNotificationSnapshotAndUpdates:
if len(updates.Pods) > 0 {
s.updates <- *updates
}
if len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, source}
}
case PodConfigNotificationSnapshot:
if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 {
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, source}
}
default:
panic(fmt.Sprintf("unsupported PodConfigNotificationMode: %#v", s.mode))
}
return nil
}
func (s *podStorage) merge(source string, change interface{}) (adds, updates, deletes *kubelet.PodUpdate) {
s.podLock.Lock()
defer s.podLock.Unlock()
adds = &kubelet.PodUpdate{Op: kubelet.ADD}
updates = &kubelet.PodUpdate{Op: kubelet.UPDATE}
deletes = &kubelet.PodUpdate{Op: kubelet.REMOVE}
pods := s.pods[source]
if pods == nil {
pods = make(map[string]*api.Pod)
}
update := change.(kubelet.PodUpdate)
switch update.Op {
case kubelet.ADD, kubelet.UPDATE:
if update.Op == kubelet.ADD {
glog.V(4).Infof("Adding new pods from source %s : %v", source, update.Pods)
} else {
glog.V(4).Infof("Updating pods from source %s : %v", source, update.Pods)
}
filtered := filterInvalidPods(update.Pods, source, s.recorder)
for _, ref := range filtered {
name := kubecontainer.GetPodFullName(ref)
if existing, found := pods[name]; found {
if !reflect.DeepEqual(existing.Spec, ref.Spec) {
// this is an update
existing.Spec = ref.Spec
updates.Pods = append(updates.Pods, *existing)
continue
}
// this is a no-op
continue
}
// this is an add
if ref.Annotations == nil {
ref.Annotations = make(map[string]string)
}
ref.Annotations[kubelet.ConfigSourceAnnotationKey] = source
pods[name] = ref
adds.Pods = append(adds.Pods, *ref)
}
case kubelet.REMOVE:
glog.V(4).Infof("Removing a pod %v", update)
for _, value := range update.Pods {
name := kubecontainer.GetPodFullName(&value)
if existing, found := pods[name]; found {
// this is a delete
delete(pods, name)
deletes.Pods = append(deletes.Pods, *existing)
continue
}
// this is a no-op
}
case kubelet.SET:
glog.V(4).Infof("Setting pods for source %s : %v", source, update)
s.markSourceSet(source)
// Clear the old map entries by just creating a new map
oldPods := pods
pods = make(map[string]*api.Pod)
filtered := filterInvalidPods(update.Pods, source, s.recorder)
for _, ref := range filtered {
name := kubecontainer.GetPodFullName(ref)
if existing, found := oldPods[name]; found {
pods[name] = existing
if !reflect.DeepEqual(existing.Spec, ref.Spec) {
// this is an update
existing.Spec = ref.Spec
updates.Pods = append(updates.Pods, *existing)
continue
}
// this is a no-op
continue
}
if ref.Annotations == nil {
ref.Annotations = make(map[string]string)
}
ref.Annotations[kubelet.ConfigSourceAnnotationKey] = source
pods[name] = ref
adds.Pods = append(adds.Pods, *ref)
}
for name, existing := range oldPods {
if _, found := pods[name]; !found {
// this is a delete
deletes.Pods = append(deletes.Pods, *existing)
}
}
default:
glog.Warningf("Received invalid update type: %v", update)
}
s.pods[source] = pods
return adds, updates, deletes
}
func (s *podStorage) markSourceSet(source string) {
s.sourcesSeenLock.Lock()
defer s.sourcesSeenLock.Unlock()
s.sourcesSeen.Insert(source)
}
func (s *podStorage) seenSources(sources ...string) bool {
s.sourcesSeenLock.Lock()
defer s.sourcesSeenLock.Unlock()
return s.sourcesSeen.HasAll(sources...)
}
func filterInvalidPods(pods []api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) {
names := util.StringSet{}
for i := range pods {
pod := &pods[i]
var errlist []error
if errs := validation.ValidatePod(pod); len(errs) != 0 {
errlist = append(errlist, errs...)
// If validation fails, don't trust it any further -
// even Name could be bad.
} else {
name := kubecontainer.GetPodFullName(pod)
if names.Has(name) {
errlist = append(errlist, fielderrors.NewFieldDuplicate("name", pod.Name))
} else {
names.Insert(name)
}
}
if len(errlist) > 0 {
name := bestPodIdentString(pod)
err := utilerrors.NewAggregate(errlist)
glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
recorder.Eventf(pod, "failedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err)
continue
}
filtered = append(filtered, pod)
}
return
}
// Sync sends a copy of the current state through the update channel.
func (s *podStorage) Sync() {
s.updateLock.Lock()
defer s.updateLock.Unlock()
s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, kubelet.AllSource}
}
// Object implements config.Accessor
func (s *podStorage) MergedState() interface{} {
s.podLock.RLock()
defer s.podLock.RUnlock()
pods := make([]api.Pod, 0)
for _, sourcePods := range s.pods {
for _, podRef := range sourcePods {
pod, err := api.Scheme.Copy(podRef)
if err != nil {
glog.Errorf("unable to copy pod: %v", err)
}
pods = append(pods, *pod.(*api.Pod))
}
}
return pods
}
func bestPodIdentString(pod *api.Pod) string {
namespace := pod.Namespace
if namespace == "" {
namespace = "<empty-namespace>"
}
name := pod.Name
if name == "" {
name = "<empty-name>"
}
return fmt.Sprintf("%s.%s", name, namespace)
}
| apache-2.0 |
zlamalp/perun | perun-base/src/main/java/cz/metacentrum/perun/core/api/exceptions/DestinationNotExistsException.java | 931 | package cz.metacentrum.perun.core.api.exceptions;
/**
* Exception is thrown when the destination does not exist
*
* @author Michal Prochazka
*/
public class DestinationNotExistsException extends EntityNotExistsException {
static final long serialVersionUID = 0;
/**
* Simple constructor with a message
* @param message message with details about the cause
*/
public DestinationNotExistsException(String message) {
super(message);
}
/**
* Constructor with a message and Throwable object
* @param message message with details about the cause
* @param cause Throwable that caused throwing of this exception
*/
public DestinationNotExistsException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructor with a Throwable object
* @param cause Throwable that caused throwing of this exception
*/
public DestinationNotExistsException(Throwable cause) {
super(cause);
}
}
| bsd-2-clause |
yacon/koala-framework | tests/Kwc/Trl/InheritContent/Test/Component.php | 409 | <?php
class Kwc_Trl_InheritContent_Test_Component extends Kwc_Abstract
{
public static function getSettings()
{
$ret = parent::getSettings();
$ret['generators']['test2'] = array(
'class' => 'Kwf_Component_Generator_Page_Static',
'component' => 'Kwc_Trl_InheritContent_Test_Test2_Component',
'name' => 'test2'
);
return $ret;
}
}
| bsd-2-clause |
mk0x9/go | src/database/sql/sql_test.go | 48361 | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sql
import (
"database/sql/driver"
"errors"
"fmt"
"math/rand"
"reflect"
"runtime"
"strings"
"sync"
"testing"
"time"
)
func init() {
type dbConn struct {
db *DB
c *driverConn
}
freedFrom := make(map[dbConn]string)
putConnHook = func(db *DB, c *driverConn) {
idx := -1
for i, v := range db.freeConn {
if v == c {
idx = i
break
}
}
if idx >= 0 {
// print before panic, as panic may get lost due to conflicting panic
// (all goroutines asleep) elsewhere, since we might not unlock
// the mutex in freeConn here.
println("double free of conn. conflicts are:\nA) " + freedFrom[dbConn{db, c}] + "\n\nand\nB) " + stack())
panic("double free of conn.")
}
freedFrom[dbConn{db, c}] = stack()
}
}
const fakeDBName = "foo"
var chrisBirthday = time.Unix(123456789, 0)
func newTestDB(t testing.TB, name string) *DB {
db, err := Open("test", fakeDBName)
if err != nil {
t.Fatalf("Open: %v", err)
}
if _, err := db.Exec("WIPE"); err != nil {
t.Fatalf("exec wipe: %v", err)
}
if name == "people" {
exec(t, db, "CREATE|people|name=string,age=int32,photo=blob,dead=bool,bdate=datetime")
exec(t, db, "INSERT|people|name=Alice,age=?,photo=APHOTO", 1)
exec(t, db, "INSERT|people|name=Bob,age=?,photo=BPHOTO", 2)
exec(t, db, "INSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?", 3, chrisBirthday)
}
if name == "magicquery" {
// Magic table name and column, known by fakedb_test.go.
exec(t, db, "CREATE|magicquery|op=string,millis=int32")
exec(t, db, "INSERT|magicquery|op=sleep,millis=10")
}
return db
}
func exec(t testing.TB, db *DB, query string, args ...interface{}) {
_, err := db.Exec(query, args...)
if err != nil {
t.Fatalf("Exec of %q: %v", query, err)
}
}
func closeDB(t testing.TB, db *DB) {
if e := recover(); e != nil {
fmt.Printf("Panic: %v\n", e)
panic(e)
}
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
for i, dc := range db.freeConn {
if n := len(dc.openStmt); n > 0 {
// Just a sanity check. This is legal in
// general, but if we make the tests clean up
// their statements first, then we can safely
// verify this is always zero here, and any
// other value is a leak.
t.Errorf("while closing db, freeConn %d/%d had %d open stmts; want 0", i, len(db.freeConn), n)
}
}
err := db.Close()
if err != nil {
t.Fatalf("error closing DB: %v", err)
}
db.mu.Lock()
count := db.numOpen
db.mu.Unlock()
if count != 0 {
t.Fatalf("%d connections still open after closing DB", db.numOpen)
}
}
// numPrepares assumes that db has exactly 1 idle conn and returns
// its count of calls to Prepare
func numPrepares(t *testing.T, db *DB) int {
if n := len(db.freeConn); n != 1 {
t.Fatalf("free conns = %d; want 1", n)
}
return db.freeConn[0].ci.(*fakeConn).numPrepare
}
func (db *DB) numDeps() int {
db.mu.Lock()
defer db.mu.Unlock()
return len(db.dep)
}
// Dependencies are closed via a goroutine, so this polls waiting for
// numDeps to fall to want, waiting up to d.
func (db *DB) numDepsPollUntil(want int, d time.Duration) int {
deadline := time.Now().Add(d)
for {
n := db.numDeps()
if n <= want || time.Now().After(deadline) {
return n
}
time.Sleep(50 * time.Millisecond)
}
}
func (db *DB) numFreeConns() int {
db.mu.Lock()
defer db.mu.Unlock()
return len(db.freeConn)
}
func (db *DB) dumpDeps(t *testing.T) {
for fc := range db.dep {
db.dumpDep(t, 0, fc, map[finalCloser]bool{})
}
}
func (db *DB) dumpDep(t *testing.T, depth int, dep finalCloser, seen map[finalCloser]bool) {
seen[dep] = true
indent := strings.Repeat(" ", depth)
ds := db.dep[dep]
for k := range ds {
t.Logf("%s%T (%p) waiting for -> %T (%p)", indent, dep, dep, k, k)
if fc, ok := k.(finalCloser); ok {
if !seen[fc] {
db.dumpDep(t, depth+1, fc, seen)
}
}
}
}
func TestQuery(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
prepares0 := numPrepares(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
type row struct {
age int
name string
}
got := []row{}
for rows.Next() {
var r row
err = rows.Scan(&r.age, &r.name)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got = append(got, r)
}
err = rows.Err()
if err != nil {
t.Fatalf("Err: %v", err)
}
want := []row{
{age: 1, name: "Alice"},
{age: 2, name: "Bob"},
{age: 3, name: "Chris"},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
}
// And verify that the final rows.Next() call, which hit EOF,
// also closed the rows connection.
if n := db.numFreeConns(); n != 1 {
t.Fatalf("free conns after query hitting EOF = %d; want 1", n)
}
if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
t.Errorf("executed %d Prepare statements; want 1", prepares)
}
}
func TestByteOwnership(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|name,photo|")
if err != nil {
t.Fatalf("Query: %v", err)
}
type row struct {
name []byte
photo RawBytes
}
got := []row{}
for rows.Next() {
var r row
err = rows.Scan(&r.name, &r.photo)
if err != nil {
t.Fatalf("Scan: %v", err)
}
got = append(got, r)
}
corruptMemory := []byte("\xffPHOTO")
want := []row{
{name: []byte("Alice"), photo: corruptMemory},
{name: []byte("Bob"), photo: corruptMemory},
{name: []byte("Chris"), photo: corruptMemory},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatch.\n got: %#v\nwant: %#v", got, want)
}
var photo RawBytes
err = db.QueryRow("SELECT|people|photo|name=?", "Alice").Scan(&photo)
if err == nil {
t.Error("want error scanning into RawBytes from QueryRow")
}
}
func TestRowsColumns(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatalf("Query: %v", err)
}
cols, err := rows.Columns()
if err != nil {
t.Fatalf("Columns: %v", err)
}
want := []string{"age", "name"}
if !reflect.DeepEqual(cols, want) {
t.Errorf("got %#v; want %#v", cols, want)
}
if err := rows.Close(); err != nil {
t.Errorf("error closing rows: %s", err)
}
}
func TestQueryRow(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var name string
var age int
var birthday time.Time
err := db.QueryRow("SELECT|people|age,name|age=?", 3).Scan(&age)
if err == nil || !strings.Contains(err.Error(), "expected 2 destination arguments") {
t.Errorf("expected error from wrong number of arguments; actually got: %v", err)
}
err = db.QueryRow("SELECT|people|bdate|age=?", 3).Scan(&birthday)
if err != nil || !birthday.Equal(chrisBirthday) {
t.Errorf("chris birthday = %v, err = %v; want %v", birthday, err, chrisBirthday)
}
err = db.QueryRow("SELECT|people|age,name|age=?", 2).Scan(&age, &name)
if err != nil {
t.Fatalf("age QueryRow+Scan: %v", err)
}
if name != "Bob" {
t.Errorf("expected name Bob, got %q", name)
}
if age != 2 {
t.Errorf("expected age 2, got %d", age)
}
err = db.QueryRow("SELECT|people|age,name|name=?", "Alice").Scan(&age, &name)
if err != nil {
t.Fatalf("name QueryRow+Scan: %v", err)
}
if name != "Alice" {
t.Errorf("expected name Alice, got %q", name)
}
if age != 1 {
t.Errorf("expected age 1, got %d", age)
}
var photo []byte
err = db.QueryRow("SELECT|people|photo|name=?", "Alice").Scan(&photo)
if err != nil {
t.Fatalf("photo QueryRow+Scan: %v", err)
}
want := []byte("APHOTO")
if !reflect.DeepEqual(photo, want) {
t.Errorf("photo = %q; want %q", photo, want)
}
}
func TestStatementErrorAfterClose(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age|name=?")
if err != nil {
t.Fatalf("Prepare: %v", err)
}
err = stmt.Close()
if err != nil {
t.Fatalf("Close: %v", err)
}
var name string
err = stmt.QueryRow("foo").Scan(&name)
if err == nil {
t.Errorf("expected error from QueryRow.Scan after Stmt.Close")
}
}
func TestStatementQueryRow(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age|name=?")
if err != nil {
t.Fatalf("Prepare: %v", err)
}
defer stmt.Close()
var age int
for n, tt := range []struct {
name string
want int
}{
{"Alice", 1},
{"Bob", 2},
{"Chris", 3},
} {
if err := stmt.QueryRow(tt.name).Scan(&age); err != nil {
t.Errorf("%d: on %q, QueryRow/Scan: %v", n, tt.name, err)
} else if age != tt.want {
t.Errorf("%d: age=%d, want %d", n, age, tt.want)
}
}
}
// golang.org/issue/3734
func TestStatementQueryRowConcurrent(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age|name=?")
if err != nil {
t.Fatalf("Prepare: %v", err)
}
defer stmt.Close()
const n = 10
ch := make(chan error, n)
for i := 0; i < n; i++ {
go func() {
var age int
err := stmt.QueryRow("Alice").Scan(&age)
if err == nil && age != 1 {
err = fmt.Errorf("unexpected age %d", age)
}
ch <- err
}()
}
for i := 0; i < n; i++ {
if err := <-ch; err != nil {
t.Error(err)
}
}
}
// just a test of fakedb itself
func TestBogusPreboundParameters(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
_, err := db.Prepare("INSERT|t1|name=?,age=bogusconversion")
if err == nil {
t.Fatalf("expected error")
}
if err.Error() != `fakedb: invalid conversion to int32 from "bogusconversion"` {
t.Errorf("unexpected error: %v", err)
}
}
func TestExec(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Errorf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
type execTest struct {
args []interface{}
wantErr string
}
execTests := []execTest{
// Okay:
{[]interface{}{"Brad", 31}, ""},
{[]interface{}{"Brad", int64(31)}, ""},
{[]interface{}{"Bob", "32"}, ""},
{[]interface{}{7, 9}, ""},
// Invalid conversions:
{[]interface{}{"Brad", int64(0xFFFFFFFF)}, "sql: converting argument #1's type: sql/driver: value 4294967295 overflows int32"},
{[]interface{}{"Brad", "strconv fail"}, "sql: converting argument #1's type: sql/driver: value \"strconv fail\" can't be converted to int32"},
// Wrong number of args:
{[]interface{}{}, "sql: expected 2 arguments, got 0"},
{[]interface{}{1, 2, 3}, "sql: expected 2 arguments, got 3"},
}
for n, et := range execTests {
_, err := stmt.Exec(et.args...)
errStr := ""
if err != nil {
errStr = err.Error()
}
if errStr != et.wantErr {
t.Errorf("stmt.Execute #%d: for %v, got error %q, want error %q",
n, et.args, errStr, et.wantErr)
}
}
}
func TestTxPrepare(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
stmt, err := tx.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
_, err = stmt.Exec("Bobby", 7)
if err != nil {
t.Fatalf("Exec = %v", err)
}
err = tx.Commit()
if err != nil {
t.Fatalf("Commit = %v", err)
}
// Commit() should have closed the statement
if !stmt.closed {
t.Fatal("Stmt not closed after Commit")
}
}
func TestTxStmt(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs := tx.Stmt(stmt)
defer txs.Close()
_, err = txs.Exec("Bobby", 7)
if err != nil {
t.Fatalf("Exec = %v", err)
}
err = tx.Commit()
if err != nil {
t.Fatalf("Commit = %v", err)
}
// Commit() should have closed the statement
if !txs.closed {
t.Fatal("Stmt not closed after Commit")
}
}
// Issue: http://golang.org/issue/2784
// This test didn't fail before because we got lucky with the fakedb driver.
// It was failing, and now not, in github.com/bradfitz/go-sql-test
func TestTxQuery(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
exec(t, db, "INSERT|t1|name=Alice")
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
r, err := tx.Query("SELECT|t1|name|")
if err != nil {
t.Fatal(err)
}
defer r.Close()
if !r.Next() {
if r.Err() != nil {
t.Fatal(r.Err())
}
t.Fatal("expected one row")
}
var x string
err = r.Scan(&x)
if err != nil {
t.Fatal(err)
}
}
func TestTxQueryInvalid(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
_, err = tx.Query("SELECT|t1|name|")
if err == nil {
t.Fatal("Error expected")
}
}
// Tests fix for issue 4433, that retries in Begin happen when
// conn.Begin() returns ErrBadConn
func TestTxErrBadConn(t *testing.T) {
db, err := Open("test", fakeDBName+";badConn")
if err != nil {
t.Fatalf("Open: %v", err)
}
if _, err := db.Exec("WIPE"); err != nil {
t.Fatalf("exec wipe: %v", err)
}
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
stmt, err := db.Prepare("INSERT|t1|name=?,age=?")
if err != nil {
t.Fatalf("Stmt, err = %v, %v", stmt, err)
}
defer stmt.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("Begin = %v", err)
}
txs := tx.Stmt(stmt)
defer txs.Close()
_, err = txs.Exec("Bobby", 7)
if err != nil {
t.Fatalf("Exec = %v", err)
}
err = tx.Commit()
if err != nil {
t.Fatalf("Commit = %v", err)
}
}
// Tests fix for issue 2542, that we release a lock when querying on
// a closed connection.
func TestIssue2542Deadlock(t *testing.T) {
db := newTestDB(t, "people")
closeDB(t, db)
for i := 0; i < 2; i++ {
_, err := db.Query("SELECT|people|age,name|")
if err == nil {
t.Fatalf("expected error")
}
}
}
// From golang.org/issue/3865
func TestCloseStmtBeforeRows(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
s, err := db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
r, err := s.Query()
if err != nil {
s.Close()
t.Fatal(err)
}
err = s.Close()
if err != nil {
t.Fatal(err)
}
r.Close()
}
// Tests fix for issue 2788, that we bind nil to a []byte if the
// value in the column is sql null
func TestNullByteSlice(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t|id=int32,name=nullstring")
exec(t, db, "INSERT|t|id=10,name=?", nil)
var name []byte
err := db.QueryRow("SELECT|t|name|id=?", 10).Scan(&name)
if err != nil {
t.Fatal(err)
}
if name != nil {
t.Fatalf("name []byte should be nil for null column value, got: %#v", name)
}
exec(t, db, "INSERT|t|id=11,name=?", "bob")
err = db.QueryRow("SELECT|t|name|id=?", 11).Scan(&name)
if err != nil {
t.Fatal(err)
}
if string(name) != "bob" {
t.Fatalf("name []byte should be bob, got: %q", string(name))
}
}
func TestPointerParamsAndScans(t *testing.T) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, "CREATE|t|id=int32,name=nullstring")
bob := "bob"
var name *string
name = &bob
exec(t, db, "INSERT|t|id=10,name=?", name)
name = nil
exec(t, db, "INSERT|t|id=20,name=?", name)
err := db.QueryRow("SELECT|t|name|id=?", 10).Scan(&name)
if err != nil {
t.Fatalf("querying id 10: %v", err)
}
if name == nil {
t.Errorf("id 10's name = nil; want bob")
} else if *name != "bob" {
t.Errorf("id 10's name = %q; want bob", *name)
}
err = db.QueryRow("SELECT|t|name|id=?", 20).Scan(&name)
if err != nil {
t.Fatalf("querying id 20: %v", err)
}
if name != nil {
t.Errorf("id 20 = %q; want nil", *name)
}
}
func TestQueryRowClosingStmt(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var name string
var age int
err := db.QueryRow("SELECT|people|age,name|age=?", 3).Scan(&age, &name)
if err != nil {
t.Fatal(err)
}
if len(db.freeConn) != 1 {
t.Fatalf("expected 1 free conn")
}
fakeConn := db.freeConn[0].ci.(*fakeConn)
if made, closed := fakeConn.stmtsMade, fakeConn.stmtsClosed; made != closed {
t.Errorf("statement close mismatch: made %d, closed %d", made, closed)
}
}
// Test issue 6651
func TestIssue6651(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var v string
want := "error in rows.Next"
rowsCursorNextHook = func(dest []driver.Value) error {
return fmt.Errorf(want)
}
defer func() { rowsCursorNextHook = nil }()
err := db.QueryRow("SELECT|people|name|").Scan(&v)
if err == nil || err.Error() != want {
t.Errorf("error = %q; want %q", err, want)
}
rowsCursorNextHook = nil
want = "error in rows.Close"
rowsCloseHook = func(rows *Rows, err *error) {
*err = fmt.Errorf(want)
}
defer func() { rowsCloseHook = nil }()
err = db.QueryRow("SELECT|people|name|").Scan(&v)
if err == nil || err.Error() != want {
t.Errorf("error = %q; want %q", err, want)
}
}
type nullTestRow struct {
nullParam interface{}
notNullParam interface{}
scanNullVal interface{}
}
type nullTestSpec struct {
nullType string
notNullType string
rows [6]nullTestRow
}
func TestNullStringParam(t *testing.T) {
spec := nullTestSpec{"nullstring", "string", [6]nullTestRow{
{NullString{"aqua", true}, "", NullString{"aqua", true}},
{NullString{"brown", false}, "", NullString{"", false}},
{"chartreuse", "", NullString{"chartreuse", true}},
{NullString{"darkred", true}, "", NullString{"darkred", true}},
{NullString{"eel", false}, "", NullString{"", false}},
{"foo", NullString{"black", false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullInt64Param(t *testing.T) {
spec := nullTestSpec{"nullint64", "int64", [6]nullTestRow{
{NullInt64{31, true}, 1, NullInt64{31, true}},
{NullInt64{-22, false}, 1, NullInt64{0, false}},
{22, 1, NullInt64{22, true}},
{NullInt64{33, true}, 1, NullInt64{33, true}},
{NullInt64{222, false}, 1, NullInt64{0, false}},
{0, NullInt64{31, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullFloat64Param(t *testing.T) {
spec := nullTestSpec{"nullfloat64", "float64", [6]nullTestRow{
{NullFloat64{31.2, true}, 1, NullFloat64{31.2, true}},
{NullFloat64{13.1, false}, 1, NullFloat64{0, false}},
{-22.9, 1, NullFloat64{-22.9, true}},
{NullFloat64{33.81, true}, 1, NullFloat64{33.81, true}},
{NullFloat64{222, false}, 1, NullFloat64{0, false}},
{10, NullFloat64{31.2, false}, nil},
}}
nullTestRun(t, spec)
}
func TestNullBoolParam(t *testing.T) {
spec := nullTestSpec{"nullbool", "bool", [6]nullTestRow{
{NullBool{false, true}, true, NullBool{false, true}},
{NullBool{true, false}, false, NullBool{false, false}},
{true, true, NullBool{true, true}},
{NullBool{true, true}, false, NullBool{true, true}},
{NullBool{true, false}, true, NullBool{false, false}},
{true, NullBool{true, false}, nil},
}}
nullTestRun(t, spec)
}
func nullTestRun(t *testing.T, spec nullTestSpec) {
db := newTestDB(t, "")
defer closeDB(t, db)
exec(t, db, fmt.Sprintf("CREATE|t|id=int32,name=string,nullf=%s,notnullf=%s", spec.nullType, spec.notNullType))
// Inserts with db.Exec:
exec(t, db, "INSERT|t|id=?,name=?,nullf=?,notnullf=?", 1, "alice", spec.rows[0].nullParam, spec.rows[0].notNullParam)
exec(t, db, "INSERT|t|id=?,name=?,nullf=?,notnullf=?", 2, "bob", spec.rows[1].nullParam, spec.rows[1].notNullParam)
// Inserts with a prepared statement:
stmt, err := db.Prepare("INSERT|t|id=?,name=?,nullf=?,notnullf=?")
if err != nil {
t.Fatalf("prepare: %v", err)
}
defer stmt.Close()
if _, err := stmt.Exec(3, "chris", spec.rows[2].nullParam, spec.rows[2].notNullParam); err != nil {
t.Errorf("exec insert chris: %v", err)
}
if _, err := stmt.Exec(4, "dave", spec.rows[3].nullParam, spec.rows[3].notNullParam); err != nil {
t.Errorf("exec insert dave: %v", err)
}
if _, err := stmt.Exec(5, "eleanor", spec.rows[4].nullParam, spec.rows[4].notNullParam); err != nil {
t.Errorf("exec insert eleanor: %v", err)
}
// Can't put null val into non-null col
if _, err := stmt.Exec(6, "bob", spec.rows[5].nullParam, spec.rows[5].notNullParam); err == nil {
t.Errorf("expected error inserting nil val with prepared statement Exec")
}
_, err = db.Exec("INSERT|t|id=?,name=?,nullf=?", 999, nil, nil)
if err == nil {
// TODO: this test fails, but it's just because
// fakeConn implements the optional Execer interface,
// so arguably this is the correct behavior. But
// maybe I should flesh out the fakeConn.Exec
// implementation so this properly fails.
// t.Errorf("expected error inserting nil name with Exec")
}
paramtype := reflect.TypeOf(spec.rows[0].nullParam)
bindVal := reflect.New(paramtype).Interface()
for i := 0; i < 5; i++ {
id := i + 1
if err := db.QueryRow("SELECT|t|nullf|id=?", id).Scan(bindVal); err != nil {
t.Errorf("id=%d Scan: %v", id, err)
}
bindValDeref := reflect.ValueOf(bindVal).Elem().Interface()
if !reflect.DeepEqual(bindValDeref, spec.rows[i].scanNullVal) {
t.Errorf("id=%d got %#v, want %#v", id, bindValDeref, spec.rows[i].scanNullVal)
}
}
}
// golang.org/issue/4859
func TestQueryRowNilScanDest(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
var name *string // nil pointer
err := db.QueryRow("SELECT|people|name|").Scan(name)
want := "sql: Scan error on column index 0: destination pointer is nil"
if err == nil || err.Error() != want {
t.Errorf("error = %q; want %q", err.Error(), want)
}
}
func TestIssue4902(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
driver := db.driver.(*fakeDriver)
opens0 := driver.openCount
var stmt *Stmt
var err error
for i := 0; i < 10; i++ {
stmt, err = db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
err = stmt.Close()
if err != nil {
t.Fatal(err)
}
}
opens := driver.openCount - opens0
if opens > 1 {
t.Errorf("opens = %d; want <= 1", opens)
t.Logf("db = %#v", db)
t.Logf("driver = %#v", driver)
t.Logf("stmt = %#v", stmt)
}
}
// Issue 3857
// This used to deadlock.
func TestSimultaneousQueries(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
defer tx.Rollback()
r1, err := tx.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
defer r1.Close()
r2, err := tx.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
defer r2.Close()
}
func TestMaxIdleConns(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
if got := len(db.freeConn); got != 1 {
t.Errorf("freeConns = %d; want 1", got)
}
db.SetMaxIdleConns(0)
if got := len(db.freeConn); got != 0 {
t.Errorf("freeConns after set to zero = %d; want 0", got)
}
tx, err = db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
if got := len(db.freeConn); got != 0 {
t.Errorf("freeConns = %d; want 0", got)
}
}
func TestMaxOpenConns(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
driver := db.driver.(*fakeDriver)
// Force the number of open connections to 0 so we can get an accurate
// count for the test
db.SetMaxIdleConns(0)
if g, w := db.numFreeConns(), 0; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(0, time.Second); n > 0 {
t.Errorf("number of dependencies = %d; expected 0", n)
db.dumpDeps(t)
}
driver.mu.Lock()
opens0 := driver.openCount
closes0 := driver.closeCount
driver.mu.Unlock()
db.SetMaxIdleConns(10)
db.SetMaxOpenConns(10)
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
t.Fatal(err)
}
// Start 50 parallel slow queries.
const (
nquery = 50
sleepMillis = 25
nbatch = 2
)
var wg sync.WaitGroup
for batch := 0; batch < nbatch; batch++ {
for i := 0; i < nquery; i++ {
wg.Add(1)
go func() {
defer wg.Done()
var op string
if err := stmt.QueryRow("sleep", sleepMillis).Scan(&op); err != nil && err != ErrNoRows {
t.Error(err)
}
}()
}
// Sleep for twice the expected length of time for the
// batch of 50 queries above to finish before starting
// the next round.
time.Sleep(2 * sleepMillis * time.Millisecond)
}
wg.Wait()
if g, w := db.numFreeConns(), 10; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(20, time.Second); n > 20 {
t.Errorf("number of dependencies = %d; expected <= 20", n)
db.dumpDeps(t)
}
driver.mu.Lock()
opens := driver.openCount - opens0
closes := driver.closeCount - closes0
driver.mu.Unlock()
if opens > 10 {
t.Logf("open calls = %d", opens)
t.Logf("close calls = %d", closes)
t.Errorf("db connections opened = %d; want <= 10", opens)
db.dumpDeps(t)
}
if err := stmt.Close(); err != nil {
t.Fatal(err)
}
if g, w := db.numFreeConns(), 10; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(10, time.Second); n > 10 {
t.Errorf("number of dependencies = %d; expected <= 10", n)
db.dumpDeps(t)
}
db.SetMaxOpenConns(5)
if g, w := db.numFreeConns(), 5; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(5, time.Second); n > 5 {
t.Errorf("number of dependencies = %d; expected 0", n)
db.dumpDeps(t)
}
db.SetMaxOpenConns(0)
if g, w := db.numFreeConns(), 5; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(5, time.Second); n > 5 {
t.Errorf("number of dependencies = %d; expected 0", n)
db.dumpDeps(t)
}
db.SetMaxIdleConns(0)
if g, w := db.numFreeConns(), 0; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(0, time.Second); n > 0 {
t.Errorf("number of dependencies = %d; expected 0", n)
db.dumpDeps(t)
}
}
// Issue 9453: tests that SetMaxOpenConns can be lowered at runtime
// and affects the subsequent release of connections.
func TestMaxOpenConnsOnBusy(t *testing.T) {
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
db.SetMaxOpenConns(3)
conn0, err := db.conn(cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
conn1, err := db.conn(cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
conn2, err := db.conn(cachedOrNewConn)
if err != nil {
t.Fatalf("db open conn fail: %v", err)
}
if g, w := db.numOpen, 3; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
db.SetMaxOpenConns(2)
if g, w := db.numOpen, 3; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
conn0.releaseConn(nil)
conn1.releaseConn(nil)
if g, w := db.numOpen, 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
conn2.releaseConn(nil)
if g, w := db.numOpen, 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
}
func TestSingleOpenConn(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxOpenConns(1)
rows, err := db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
// shouldn't deadlock
rows, err = db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
}
func TestStats(t *testing.T) {
db := newTestDB(t, "people")
stats := db.Stats()
if got := stats.OpenConnections; got != 1 {
t.Errorf("stats.OpenConnections = %d; want 1", got)
}
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
tx.Commit()
closeDB(t, db)
stats = db.Stats()
if got := stats.OpenConnections; got != 0 {
t.Errorf("stats.OpenConnections = %d; want 0", got)
}
}
// golang.org/issue/5323
func TestStmtCloseDeps(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v", err)
}
})
db := newTestDB(t, "magicquery")
defer closeDB(t, db)
driver := db.driver.(*fakeDriver)
driver.mu.Lock()
opens0 := driver.openCount
closes0 := driver.closeCount
driver.mu.Unlock()
openDelta0 := opens0 - closes0
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
t.Fatal(err)
}
// Start 50 parallel slow queries.
const (
nquery = 50
sleepMillis = 25
nbatch = 2
)
var wg sync.WaitGroup
for batch := 0; batch < nbatch; batch++ {
for i := 0; i < nquery; i++ {
wg.Add(1)
go func() {
defer wg.Done()
var op string
if err := stmt.QueryRow("sleep", sleepMillis).Scan(&op); err != nil && err != ErrNoRows {
t.Error(err)
}
}()
}
// Sleep for twice the expected length of time for the
// batch of 50 queries above to finish before starting
// the next round.
time.Sleep(2 * sleepMillis * time.Millisecond)
}
wg.Wait()
if g, w := db.numFreeConns(), 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(4, time.Second); n > 4 {
t.Errorf("number of dependencies = %d; expected <= 4", n)
db.dumpDeps(t)
}
driver.mu.Lock()
opens := driver.openCount - opens0
closes := driver.closeCount - closes0
openDelta := (driver.openCount - driver.closeCount) - openDelta0
driver.mu.Unlock()
if openDelta > 2 {
t.Logf("open calls = %d", opens)
t.Logf("close calls = %d", closes)
t.Logf("open delta = %d", openDelta)
t.Errorf("db connections opened = %d; want <= 2", openDelta)
db.dumpDeps(t)
}
if len(stmt.css) > nquery {
t.Errorf("len(stmt.css) = %d; want <= %d", len(stmt.css), nquery)
}
if err := stmt.Close(); err != nil {
t.Fatal(err)
}
if g, w := db.numFreeConns(), 2; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(2, time.Second); n > 2 {
t.Errorf("number of dependencies = %d; expected <= 2", n)
db.dumpDeps(t)
}
db.SetMaxIdleConns(0)
if g, w := db.numFreeConns(), 0; g != w {
t.Errorf("free conns = %d; want %d", g, w)
}
if n := db.numDepsPollUntil(0, time.Second); n > 0 {
t.Errorf("number of dependencies = %d; expected 0", n)
db.dumpDeps(t)
}
}
// golang.org/issue/5046
func TestCloseConnBeforeStmts(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
defer setHookpostCloseConn(nil)
setHookpostCloseConn(func(_ *fakeConn, err error) {
if err != nil {
t.Errorf("Error closing fakeConn: %v; from %s", err, stack())
db.dumpDeps(t)
t.Errorf("DB = %#v", db)
}
})
stmt, err := db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
if len(db.freeConn) != 1 {
t.Fatalf("expected 1 freeConn; got %d", len(db.freeConn))
}
dc := db.freeConn[0]
if dc.closed {
t.Errorf("conn shouldn't be closed")
}
if n := len(dc.openStmt); n != 1 {
t.Errorf("driverConn num openStmt = %d; want 1", n)
}
err = db.Close()
if err != nil {
t.Errorf("db Close = %v", err)
}
if !dc.closed {
t.Errorf("after db.Close, driverConn should be closed")
}
if n := len(dc.openStmt); n != 0 {
t.Errorf("driverConn num openStmt = %d; want 0", n)
}
err = stmt.Close()
if err != nil {
t.Errorf("Stmt close = %v", err)
}
if !dc.closed {
t.Errorf("conn should be closed")
}
if dc.ci != nil {
t.Errorf("after Stmt Close, driverConn's Conn interface should be nil")
}
}
// golang.org/issue/5283: don't release the Rows' connection in Close
// before calling Stmt.Close.
func TestRowsCloseOrder(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxIdleConns(0)
setStrictFakeConnClose(t)
defer setStrictFakeConnClose(nil)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
err = rows.Close()
if err != nil {
t.Fatal(err)
}
}
func TestRowsImplicitClose(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
want, fail := 2, errors.New("fail")
r := rows.rowsi.(*rowsCursor)
r.errPos, r.err = want, fail
got := 0
for rows.Next() {
got++
}
if got != want {
t.Errorf("got %d rows, want %d", got, want)
}
if err := rows.Err(); err != fail {
t.Errorf("got error %v, want %v", err, fail)
}
if !r.closed {
t.Errorf("r.closed is false, want true")
}
}
func TestStmtCloseOrder(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
db.SetMaxIdleConns(0)
setStrictFakeConnClose(t)
defer setStrictFakeConnClose(nil)
_, err := db.Query("SELECT|non_existent|name|")
if err == nil {
t.Fatal("Quering non-existent table should fail")
}
}
// Test cases where there's more than maxBadConnRetries bad connections in the
// pool (issue 8834)
func TestManyErrBadConn(t *testing.T) {
manyErrBadConnSetup := func() *DB {
db := newTestDB(t, "people")
nconn := maxBadConnRetries + 1
db.SetMaxIdleConns(nconn)
db.SetMaxOpenConns(nconn)
// open enough connections
func() {
for i := 0; i < nconn; i++ {
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
defer rows.Close()
}
}()
if db.numOpen != nconn {
t.Fatalf("unexpected numOpen %d (was expecting %d)", db.numOpen, nconn)
} else if len(db.freeConn) != nconn {
t.Fatalf("unexpected len(db.freeConn) %d (was expecting %d)", len(db.freeConn), nconn)
}
for _, conn := range db.freeConn {
conn.ci.(*fakeConn).stickyBad = true
}
return db
}
// Query
db := manyErrBadConnSetup()
defer closeDB(t, db)
rows, err := db.Query("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
// Exec
db = manyErrBadConnSetup()
defer closeDB(t, db)
_, err = db.Exec("INSERT|people|name=Julia,age=19")
if err != nil {
t.Fatal(err)
}
// Begin
db = manyErrBadConnSetup()
defer closeDB(t, db)
tx, err := db.Begin()
if err != nil {
t.Fatal(err)
}
if err = tx.Rollback(); err != nil {
t.Fatal(err)
}
// Prepare
db = manyErrBadConnSetup()
defer closeDB(t, db)
stmt, err := db.Prepare("SELECT|people|age,name|")
if err != nil {
t.Fatal(err)
}
if err = stmt.Close(); err != nil {
t.Fatal(err)
}
}
// golang.org/issue/5781
func TestErrBadConnReconnect(t *testing.T) {
db := newTestDB(t, "foo")
defer closeDB(t, db)
exec(t, db, "CREATE|t1|name=string,age=int32,dead=bool")
simulateBadConn := func(name string, hook *func() bool, op func() error) {
broken, retried := false, false
numOpen := db.numOpen
// simulate a broken connection on the first try
*hook = func() bool {
if !broken {
broken = true
return true
}
retried = true
return false
}
if err := op(); err != nil {
t.Errorf(name+": %v", err)
return
}
if !broken || !retried {
t.Error(name + ": Failed to simulate broken connection")
}
*hook = nil
if numOpen != db.numOpen {
t.Errorf(name+": leaked %d connection(s)!", db.numOpen-numOpen)
numOpen = db.numOpen
}
}
// db.Exec
dbExec := func() error {
_, err := db.Exec("INSERT|t1|name=?,age=?,dead=?", "Gordon", 3, true)
return err
}
simulateBadConn("db.Exec prepare", &hookPrepareBadConn, dbExec)
simulateBadConn("db.Exec exec", &hookExecBadConn, dbExec)
// db.Query
dbQuery := func() error {
rows, err := db.Query("SELECT|t1|age,name|")
if err == nil {
err = rows.Close()
}
return err
}
simulateBadConn("db.Query prepare", &hookPrepareBadConn, dbQuery)
simulateBadConn("db.Query query", &hookQueryBadConn, dbQuery)
// db.Prepare
simulateBadConn("db.Prepare", &hookPrepareBadConn, func() error {
stmt, err := db.Prepare("INSERT|t1|name=?,age=?,dead=?")
if err != nil {
return err
}
stmt.Close()
return nil
})
// Provide a way to force a re-prepare of a statement on next execution
forcePrepare := func(stmt *Stmt) {
stmt.css = nil
}
// stmt.Exec
stmt1, err := db.Prepare("INSERT|t1|name=?,age=?,dead=?")
if err != nil {
t.Fatalf("prepare: %v", err)
}
defer stmt1.Close()
// make sure we must prepare the stmt first
forcePrepare(stmt1)
stmtExec := func() error {
_, err := stmt1.Exec("Gopher", 3, false)
return err
}
simulateBadConn("stmt.Exec prepare", &hookPrepareBadConn, stmtExec)
simulateBadConn("stmt.Exec exec", &hookExecBadConn, stmtExec)
// stmt.Query
stmt2, err := db.Prepare("SELECT|t1|age,name|")
if err != nil {
t.Fatalf("prepare: %v", err)
}
defer stmt2.Close()
// make sure we must prepare the stmt first
forcePrepare(stmt2)
stmtQuery := func() error {
rows, err := stmt2.Query()
if err == nil {
err = rows.Close()
}
return err
}
simulateBadConn("stmt.Query prepare", &hookPrepareBadConn, stmtQuery)
simulateBadConn("stmt.Query exec", &hookQueryBadConn, stmtQuery)
}
type concurrentTest interface {
init(t testing.TB, db *DB)
finish(t testing.TB)
test(t testing.TB) error
}
type concurrentDBQueryTest struct {
db *DB
}
func (c *concurrentDBQueryTest) init(t testing.TB, db *DB) {
c.db = db
}
func (c *concurrentDBQueryTest) finish(t testing.TB) {
c.db = nil
}
func (c *concurrentDBQueryTest) test(t testing.TB) error {
rows, err := c.db.Query("SELECT|people|name|")
if err != nil {
t.Error(err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentDBExecTest struct {
db *DB
}
func (c *concurrentDBExecTest) init(t testing.TB, db *DB) {
c.db = db
}
func (c *concurrentDBExecTest) finish(t testing.TB) {
c.db = nil
}
func (c *concurrentDBExecTest) test(t testing.TB) error {
_, err := c.db.Exec("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?", 3, chrisBirthday)
if err != nil {
t.Error(err)
return err
}
return nil
}
type concurrentStmtQueryTest struct {
db *DB
stmt *Stmt
}
func (c *concurrentStmtQueryTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.stmt, err = db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentStmtQueryTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
c.db = nil
}
func (c *concurrentStmtQueryTest) test(t testing.TB) error {
rows, err := c.stmt.Query()
if err != nil {
t.Errorf("error on query: %v", err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentStmtExecTest struct {
db *DB
stmt *Stmt
}
func (c *concurrentStmtExecTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.stmt, err = db.Prepare("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentStmtExecTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
c.db = nil
}
func (c *concurrentStmtExecTest) test(t testing.TB) error {
_, err := c.stmt.Exec(3, chrisBirthday)
if err != nil {
t.Errorf("error on exec: %v", err)
return err
}
return nil
}
type concurrentTxQueryTest struct {
db *DB
tx *Tx
}
func (c *concurrentTxQueryTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxQueryTest) finish(t testing.TB) {
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxQueryTest) test(t testing.TB) error {
rows, err := c.db.Query("SELECT|people|name|")
if err != nil {
t.Error(err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentTxExecTest struct {
db *DB
tx *Tx
}
func (c *concurrentTxExecTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxExecTest) finish(t testing.TB) {
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxExecTest) test(t testing.TB) error {
_, err := c.tx.Exec("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?", 3, chrisBirthday)
if err != nil {
t.Error(err)
return err
}
return nil
}
type concurrentTxStmtQueryTest struct {
db *DB
tx *Tx
stmt *Stmt
}
func (c *concurrentTxStmtQueryTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
c.stmt, err = c.tx.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxStmtQueryTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxStmtQueryTest) test(t testing.TB) error {
rows, err := c.stmt.Query()
if err != nil {
t.Errorf("error on query: %v", err)
return err
}
var name string
for rows.Next() {
rows.Scan(&name)
}
rows.Close()
return nil
}
type concurrentTxStmtExecTest struct {
db *DB
tx *Tx
stmt *Stmt
}
func (c *concurrentTxStmtExecTest) init(t testing.TB, db *DB) {
c.db = db
var err error
c.tx, err = c.db.Begin()
if err != nil {
t.Fatal(err)
}
c.stmt, err = c.tx.Prepare("NOSERT|people|name=Chris,age=?,photo=CPHOTO,bdate=?")
if err != nil {
t.Fatal(err)
}
}
func (c *concurrentTxStmtExecTest) finish(t testing.TB) {
if c.stmt != nil {
c.stmt.Close()
c.stmt = nil
}
if c.tx != nil {
c.tx.Rollback()
c.tx = nil
}
c.db = nil
}
func (c *concurrentTxStmtExecTest) test(t testing.TB) error {
_, err := c.stmt.Exec(3, chrisBirthday)
if err != nil {
t.Errorf("error on exec: %v", err)
return err
}
return nil
}
type concurrentRandomTest struct {
tests []concurrentTest
}
func (c *concurrentRandomTest) init(t testing.TB, db *DB) {
c.tests = []concurrentTest{
new(concurrentDBQueryTest),
new(concurrentDBExecTest),
new(concurrentStmtQueryTest),
new(concurrentStmtExecTest),
new(concurrentTxQueryTest),
new(concurrentTxExecTest),
new(concurrentTxStmtQueryTest),
new(concurrentTxStmtExecTest),
}
for _, ct := range c.tests {
ct.init(t, db)
}
}
func (c *concurrentRandomTest) finish(t testing.TB) {
for _, ct := range c.tests {
ct.finish(t)
}
}
func (c *concurrentRandomTest) test(t testing.TB) error {
ct := c.tests[rand.Intn(len(c.tests))]
return ct.test(t)
}
func doConcurrentTest(t testing.TB, ct concurrentTest) {
maxProcs, numReqs := 1, 500
if testing.Short() {
maxProcs, numReqs = 4, 50
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
db := newTestDB(t, "people")
defer closeDB(t, db)
ct.init(t, db)
defer ct.finish(t)
var wg sync.WaitGroup
wg.Add(numReqs)
reqs := make(chan bool)
defer close(reqs)
for i := 0; i < maxProcs*2; i++ {
go func() {
for range reqs {
err := ct.test(t)
if err != nil {
wg.Done()
continue
}
wg.Done()
}
}()
}
for i := 0; i < numReqs; i++ {
reqs <- true
}
wg.Wait()
}
func TestIssue6081(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
drv := db.driver.(*fakeDriver)
drv.mu.Lock()
opens0 := drv.openCount
closes0 := drv.closeCount
drv.mu.Unlock()
stmt, err := db.Prepare("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
rowsCloseHook = func(rows *Rows, err *error) {
*err = driver.ErrBadConn
}
defer func() { rowsCloseHook = nil }()
for i := 0; i < 10; i++ {
rows, err := stmt.Query()
if err != nil {
t.Fatal(err)
}
rows.Close()
}
if n := len(stmt.css); n > 1 {
t.Errorf("len(css slice) = %d; want <= 1", n)
}
stmt.Close()
if n := len(stmt.css); n != 0 {
t.Errorf("len(css slice) after Close = %d; want 0", n)
}
drv.mu.Lock()
opens := drv.openCount - opens0
closes := drv.closeCount - closes0
drv.mu.Unlock()
if opens < 9 {
t.Errorf("opens = %d; want >= 9", opens)
}
if closes < 9 {
t.Errorf("closes = %d; want >= 9", closes)
}
}
func TestConcurrency(t *testing.T) {
doConcurrentTest(t, new(concurrentDBQueryTest))
doConcurrentTest(t, new(concurrentDBExecTest))
doConcurrentTest(t, new(concurrentStmtQueryTest))
doConcurrentTest(t, new(concurrentStmtExecTest))
doConcurrentTest(t, new(concurrentTxQueryTest))
doConcurrentTest(t, new(concurrentTxExecTest))
doConcurrentTest(t, new(concurrentTxStmtQueryTest))
doConcurrentTest(t, new(concurrentTxStmtExecTest))
doConcurrentTest(t, new(concurrentRandomTest))
}
func TestConnectionLeak(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
// Start by opening defaultMaxIdleConns
rows := make([]*Rows, defaultMaxIdleConns)
// We need to SetMaxOpenConns > MaxIdleConns, so the DB can open
// a new connection and we can fill the idle queue with the released
// connections.
db.SetMaxOpenConns(len(rows) + 1)
for ii := range rows {
r, err := db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
r.Next()
if err := r.Err(); err != nil {
t.Fatal(err)
}
rows[ii] = r
}
// Now we have defaultMaxIdleConns busy connections. Open
// a new one, but wait until the busy connections are released
// before returning control to DB.
drv := db.driver.(*fakeDriver)
drv.waitCh = make(chan struct{}, 1)
drv.waitingCh = make(chan struct{}, 1)
var wg sync.WaitGroup
wg.Add(1)
go func() {
r, err := db.Query("SELECT|people|name|")
if err != nil {
t.Fatal(err)
}
r.Close()
wg.Done()
}()
// Wait until the goroutine we've just created has started waiting.
<-drv.waitingCh
// Now close the busy connections. This provides a connection for
// the blocked goroutine and then fills up the idle queue.
for _, v := range rows {
v.Close()
}
// At this point we give the new connection to DB. This connection is
// now useless, since the idle queue is full and there are no pending
// requests. DB should deal with this situation without leaking the
// connection.
drv.waitCh <- struct{}{}
wg.Wait()
}
func BenchmarkConcurrentDBExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentDBExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentStmtQuery(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentStmtQueryTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentStmtExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentStmtExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxQuery(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxQueryTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxStmtQuery(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxStmtQueryTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentTxStmtExec(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentTxStmtExecTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkConcurrentRandom(b *testing.B) {
b.ReportAllocs()
ct := new(concurrentRandomTest)
for i := 0; i < b.N; i++ {
doConcurrentTest(b, ct)
}
}
func BenchmarkManyConcurrentQueries(b *testing.B) {
b.ReportAllocs()
// To see lock contention in Go 1.4, 16~ cores and 128~ goroutines are required.
const parallelism = 16
db := newTestDB(b, "magicquery")
defer closeDB(b, db)
db.SetMaxIdleConns(runtime.GOMAXPROCS(0) * parallelism)
stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
b.SetParallelism(parallelism)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
rows, err := stmt.Query("sleep", 1)
if err != nil {
b.Error(err)
return
}
rows.Close()
}
})
}
| bsd-3-clause |
nwjs/chromium.src | tools/metrics/histograms/suffixes_to_variants.py | 14876 | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Migrates histogram_suffixes to patterned histograms"""
import argparse
import logging
import os
from xml.dom import minidom
import extract_histograms
import histogram_configuration_model
import histogram_paths
import path_util
HISTOGRAM_SUFFIXES_LIST_PATH = path_util.GetInputFile(
'tools/metrics/histograms/metadata/histogram_suffixes_list.xml')
def _ExtractObsoleteNode(node, recursive=True):
"""Extracts obsolete child from |node|. Returns None if not exists."""
if not recursive:
obsolete = [
element for element in node.getElementsByTagName('obsolete')
if element.parentNode == node
]
else:
obsolete = node.getElementsByTagName('obsolete')
if not obsolete:
return None
assert len(obsolete) == 1, (
'Node %s should at most contain one obsolete node.' %
node.getAttribute('name'))
return obsolete[0]
def _ExtractOwnerNodes(node):
"""Extracts all owners from |node|. Returns None if not exists."""
return node.getElementsByTagName('owner')
def _RemoveSuffixesComment(node, histogram_suffixes_name):
"""Remove suffixes related comments from |node|."""
for child in node.childNodes:
if child.nodeType == minidom.Node.COMMENT_NODE:
if ('Name completed by' in child.data
and histogram_suffixes_name in child.data):
node.removeChild(child)
def _UpdateSummary(histogram, histogram_suffixes_name):
"""Appends a placeholder string to the |histogram|'s summary node."""
summary = histogram.getElementsByTagName('summary')
assert len(summary) == 1, 'A histogram should have a single summary node.'
summary = summary[0]
if summary.firstChild.nodeType != summary.TEXT_NODE:
raise ValueError('summary_node doesn\'t contain text.')
summary.firstChild.replaceWholeText(
'%s {%s}' % (summary.firstChild.data.strip(), histogram_suffixes_name))
def _AreAllAffectedHistogramsFound(affected_histograms, histograms):
"""Checks that are all affected histograms found in |histograms|."""
histogram_names = [histogram.getAttribute('name') for histogram in histograms]
return all(
affected_histogram.getAttribute('name') in histogram_names
for affected_histogram in affected_histograms)
def _GetSuffixesDict(nodes, all_histograms):
"""Gets a dict of simple histogram-suffixes to be used in the migration.
Returns two dicts of histogram-suffixes to be migrated to the new patterned
histograms syntax.
The first dict: the keys are the histogram-suffixes' affected histogram name
and the values are the histogram_suffixes nodes that have only one
affected-histogram. These histograms-suffixes can be converted to inline
patterned histograms.
The second dict: the keys are the histogram_suffixes name and the values
are the histogram_suffixes nodes whose affected-histograms are all present in
the |all_histograms|. These histogram suffixes can be converted to out-of-line
variants.
Args:
nodes: A Nodelist of histograms_suffixes nodes.
all_histograms: A Nodelist of all chosen histograms.
Returns:
A dict of histograms-suffixes nodes keyed by their names.
"""
single_affected = {}
all_affected_found = {}
for histogram_suffixes in nodes:
affected_histograms = histogram_suffixes.getElementsByTagName(
'affected-histogram')
if len(affected_histograms) == 1:
affected_histogram = affected_histograms[0].getAttribute('name')
single_affected[affected_histogram] = histogram_suffixes
elif _AreAllAffectedHistogramsFound(affected_histograms, all_histograms):
for affected_histogram in affected_histograms:
affected_histogram = affected_histogram.getAttribute('name')
if affected_histogram in all_affected_found:
logging.warning(
'Histogram %s is already associated with other suffixes. '
'Please manually migrate it.', affected_histogram)
continue
all_affected_found[affected_histogram] = histogram_suffixes
return single_affected, all_affected_found
def _GetBaseVariant(doc, histogram):
"""Returns a <variant> node whose name is an empty string as the base variant.
If histogram has attribute `base = True`, it means that the base histogram
should be marked as obsolete.
Args:
doc: A Document object which is used to create a new <variant> node.
histogram: The <histogram> node to check whether its base is true or not.
Returns:
A <variant> node.
"""
is_base = False
if histogram.hasAttribute('base'):
is_base = histogram.getAttribute('base').lower() == 'true'
histogram.removeAttribute('base')
base_variant = doc.createElement('variant')
base_variant.setAttribute('name', '')
if is_base:
base_obsolete_node = doc.createElement('obsolete')
base_obsolete_node.appendChild(
doc.createTextNode(
extract_histograms.DEFAULT_BASE_HISTOGRAM_OBSOLETE_REASON))
base_variant.appendChild(base_obsolete_node)
return base_variant
def _PopulateVariantsWithSuffixes(doc, node, histogram_suffixes):
"""Populates <variant> nodes to |node| from <suffix>.
This function returns True if none of the suffixes contains 'base' attribute.
If this function returns false, the caller's histogram node will not be
updated. This is mainly because base suffix is a much more complicated case
and thus it can not be automatically updated at least for now.
Args:
doc: A Document object which is used to create a new <variant> node.
node: The node to be populated. it should be either <token> for inline
variants or <variants> for out-of-line variants.
histogram_suffixes: A <histogram_suffixes> node.
Returns:
True if the node can be updated automatically.
"""
separator = histogram_suffixes.getAttribute('separator')
suffixes_owners = _ExtractOwnerNodes(histogram_suffixes)
suffixes_name = histogram_suffixes.getAttribute('name')
# Check if <histogram_suffixes> node has its own <obsolete> node.
obsolete_histogram_suffix_node = _ExtractObsoleteNode(histogram_suffixes,
False)
for suffix in histogram_suffixes.getElementsByTagName('suffix'):
# The base suffix is a much more complicated case. It might require manual
# effort to migrate them so skip this case for now.
suffix_name = suffix.getAttribute('name')
if suffix.hasAttribute('base'):
logging.warning(
'suffix: %s in histogram_suffixes %s has base attribute. Please '
'manually migrate it.', suffix_name, suffixes_name)
return False
# Suffix name might be empty. In this case, in order not to collide with the
# base variant, remove the base variant first before populating this.
if not suffix_name:
logging.warning(
'histogram suffixes: %s contains empty string suffix and thus we '
'have to manually update the empty string variant in these base '
'histograms: %s.', suffixes_name, ','.join(
h.getAttribute('name') for h in
histogram_suffixes.getElementsByTagName('affected-histogram')))
return False
variant = doc.createElement('variant')
if histogram_suffixes.hasAttribute('ordering'):
variant.setAttribute('name', suffix_name + separator)
else:
variant.setAttribute('name', separator + suffix_name)
if suffix.hasAttribute('label'):
variant.setAttribute('summary', suffix.getAttribute('label'))
# Obsolete the obsolete node from suffix to the new variant. The obsolete
# node for each suffix should override the obsolete node, if exists,
# in the histogram_suffixes node.
obsolete = _ExtractObsoleteNode(suffix) or obsolete_histogram_suffix_node
if obsolete:
variant.appendChild(obsolete.cloneNode(deep=True))
# Populate owner's node from histogram suffixes to each new variant.
for owner in suffixes_owners:
variant.appendChild(owner.cloneNode(deep=True))
node.appendChild(variant)
return True
def _UpdateHistogramName(histogram, histogram_suffixes):
"""Adds histogram_suffixes's placeholder to the histogram name."""
histogram_name = histogram.getAttribute('name')
histogram_suffixes_name = histogram_suffixes.getAttribute('name')
ordering = histogram_suffixes.getAttribute('ordering')
if not ordering:
histogram.setAttribute('name',
'%s{%s}' % (histogram_name, histogram_suffixes_name))
else:
parts = ordering.split(',')
placement = 1
if len(parts) > 1:
placement = int(parts[1])
sections = histogram_name.split('.')
cluster = '.'.join(sections[0:placement]) + '.'
reminder = '.'.join(sections[placement:])
histogram.setAttribute(
'name', '%s{%s}%s' % (cluster, histogram_suffixes_name, reminder))
def MigrateToInlinePatterenedHistogram(doc, histogram, histogram_suffixes):
"""Migates a single histogram suffixes to an inline patterned histogram."""
# Keep a deep copy in case when the |histogram| fails to be migrated.
old_histogram = histogram.cloneNode(deep=True)
# Update histogram's name with the histogram_suffixes' name.
histogram_suffixes_name = histogram_suffixes.getAttribute('name')
_UpdateHistogramName(histogram, histogram_suffixes)
# Append |histogram_suffixes_name| placeholder string to the summary text.
_UpdateSummary(histogram, histogram_suffixes_name)
# Create an inline <token> node.
token = doc.createElement('token')
token.setAttribute('key', histogram_suffixes_name)
token.appendChild(_GetBaseVariant(doc, histogram))
# Populate <variant>s to the inline <token> node.
if not _PopulateVariantsWithSuffixes(doc, token, histogram_suffixes):
logging.warning('histogram_suffixes: %s needs manually effort',
histogram_suffixes_name)
histograms = histogram.parentNode
histograms.removeChild(histogram)
# Restore old histogram when we the script fails to migrate it.
histograms.appendChild(old_histogram)
else:
histogram.appendChild(token)
histogram_suffixes.parentNode.removeChild(histogram_suffixes)
# Remove obsolete comments from the histogram node.
_RemoveSuffixesComment(histogram, histogram_suffixes_name)
def MigrateToOutOflinePatterenedHistogram(doc, histogram, histogram_suffixes):
"""Migates a histogram suffixes to out-of-line patterned histogram."""
# Update histogram's name with the histogram_suffixes' name.
histogram_suffixes_name = histogram_suffixes.getAttribute('name')
_UpdateHistogramName(histogram, histogram_suffixes)
# Append |histogram_suffixes_name| placeholder string to the summary text.
_UpdateSummary(histogram, histogram_suffixes_name)
# Create a <token> node that links to an out-of-line <variants>.
token = doc.createElement('token')
token.setAttribute('key', histogram_suffixes_name)
token.setAttribute('variants', histogram_suffixes_name)
token.appendChild(_GetBaseVariant(doc, histogram))
histogram.appendChild(token)
# Remove obsolete comments from the histogram node.
_RemoveSuffixesComment(histogram, histogram_suffixes_name)
def _MigrateOutOfLineVariants(doc, histograms, suffixes_to_convert):
"""Converts a histogram-suffixes node to an out-of-line variants."""
histograms_node = histograms.getElementsByTagName('histograms')
assert len(histograms_node) == 1, (
'Every histograms.xml should have only one <histograms> node.')
for suffixes in suffixes_to_convert:
histogram_suffixes_name = suffixes.getAttribute('name')
variants = doc.createElement('variants')
variants.setAttribute('name', histogram_suffixes_name)
if not _PopulateVariantsWithSuffixes(doc, variants, suffixes):
logging.warning('histogram_suffixes: %s needs manually effort',
histogram_suffixes_name)
else:
histograms_node[0].appendChild(variants)
suffixes.parentNode.removeChild(suffixes)
def ChooseFiles(args):
"""Chooses a set of files to process so that we can migrate incrementally."""
paths = []
for path in sorted(histogram_paths.HISTOGRAMS_XMLS):
if 'metadata' in path and path.endswith('histograms.xml'):
name = os.path.basename(os.path.dirname(path))
if args.start <= name[0] <= args.end:
paths.append(path)
if args.obsolete:
paths.append(histogram_paths.OBSOLETE_XML)
return paths
def SuffixesToVariantsMigration(args):
"""Migates all histogram suffixes to patterned histograms."""
histogram_suffixes_list = minidom.parse(open(HISTOGRAM_SUFFIXES_LIST_PATH))
histogram_suffixes_nodes = histogram_suffixes_list.getElementsByTagName(
'histogram_suffixes')
doc = minidom.Document()
for histograms_file in ChooseFiles(args):
histograms = minidom.parse(open(histograms_file))
single_affected, all_affected_found = _GetSuffixesDict(
histogram_suffixes_nodes, histograms.getElementsByTagName('histogram'))
suffixes_to_convert = set()
for histogram in histograms.getElementsByTagName('histogram'):
name = histogram.getAttribute('name')
# Migrate inline patterned histograms.
if name in single_affected.keys():
MigrateToInlinePatterenedHistogram(doc, histogram,
single_affected[name])
elif name in all_affected_found.keys():
suffixes_to_convert.add(all_affected_found[name])
MigrateToOutOflinePatterenedHistogram(doc, histogram,
all_affected_found[name])
_MigrateOutOfLineVariants(doc, histograms, suffixes_to_convert)
# Update histograms.xml with patterned histograms.
with open(histograms_file, 'w') as f:
pretty_xml_string = histogram_configuration_model.PrettifyTree(histograms)
f.write(pretty_xml_string)
# Remove histogram_suffixes that have already been migrated.
with open(HISTOGRAM_SUFFIXES_LIST_PATH, 'w') as f:
pretty_xml_string = histogram_configuration_model.PrettifyTree(
histogram_suffixes_list)
f.write(pretty_xml_string)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--start',
help='Start migration from a certain character (inclusive).',
default='a')
parser.add_argument('--end',
help='End migration at a certain character (inclusive).',
default='z')
parser.add_argument('--obsolete',
help='Whether to migrate obsolete_histograms.xml',
default=False)
args = parser.parse_args()
assert len(args.start) == 1 and len(args.end) == 1, (
'start and end flag should only contain a single letter.')
SuffixesToVariantsMigration(args)
| bsd-3-clause |
nwjs/chromium.src | testing/chromoting/chromoting_test_driver_launcher.py | 4937 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility script to run chromoting test driver tests on the Chromoting bot."""
from __future__ import print_function
import argparse
from chromoting_test_utilities import GetJidFromHostLog
from chromoting_test_utilities import InitialiseTestMachineForLinux
from chromoting_test_utilities import MAX_RETRIES
from chromoting_test_utilities import PrintHostLogContents
from chromoting_test_utilities import PROD_DIR_ID
from chromoting_test_utilities import RunCommandInSubProcess
from chromoting_test_utilities import TestCaseSetup
from chromoting_test_utilities import TestMachineCleanup
TEST_ENVIRONMENT_TEAR_DOWN_INDICATOR = 'Global test environment tear-down'
FAILED_INDICATOR = '[ FAILED ]'
def LaunchCTDCommand(args, command):
"""Launches the specified chromoting test driver command.
Args:
args: Command line args, used for test-case startup tasks.
command: Chromoting Test Driver command line.
Returns:
command, host_log_file_names: Tuple of:
"command" if there was a test-environment failure, or any failing test, and
list of host-log file-names.
"""
host_log_file_names = []
host_log_file_names.append(TestCaseSetup(args))
# Parse the me2me host log to obtain the JID that the host registered.
host_jid = GetJidFromHostLog(host_log_file_names[-1])
if not host_jid:
# Host-JID not found in log. Let's not attempt to run this test.
print('Host-JID not found in log %s.' % host_log_file_names[-1])
return '[Command failed]: %s, %s' % (command, host_log_file_names)
retries = 0
failed_tests_list = []
# TODO(anandc): Remove this retry-logic once http://crbug/570840 is fixed.
while retries <= MAX_RETRIES:
# In order to ensure the host is online with the expected JID, pass in the
# jid obtained from the host-log as a command-line parameter.
command = command.replace('\n', '') + ' --hostjid=%s' % host_jid
results = RunCommandInSubProcess(command)
tear_down_index = results.find(TEST_ENVIRONMENT_TEAR_DOWN_INDICATOR)
if tear_down_index == -1:
# The test environment did not tear down. Something went horribly wrong.
return '[Command failed]: ' + command, host_log_file_names
end_results_list = results[tear_down_index:].split('\n')
test_failed = False
for result in end_results_list:
if result.startswith(FAILED_INDICATOR):
test_failed = True
if retries == MAX_RETRIES:
# Test failed and we have no more retries left.
failed_tests_list.append(result)
if test_failed:
retries += 1
else:
break
if failed_tests_list:
test_result = '[Command]: ' + command
# Note: Skipping the first one is intentional.
for i in range(1, len(failed_tests_list)):
test_result += ' ' + failed_tests_list[i]
return test_result, host_log_file_names
# All tests passed!
return '', host_log_file_names
def main(args):
InitialiseTestMachineForLinux(args.cfg_file)
failed_tests = ''
host_log_files = []
with open(args.commands_file) as f:
for line in f:
# Replace the PROD_DIR value in the command-line with
# the passed in value.
line = line.replace(PROD_DIR_ID, args.prod_dir)
# Launch specified command line for test.
test_results, log_files = LaunchCTDCommand(args, line)
failed_tests += test_results
host_log_files.extend(log_files)
# All tests completed. Include host-logs in the test results.
PrintHostLogContents(host_log_files)
return failed_tests, host_log_files
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--commands_file',
help='path to file listing commands to be launched.')
parser.add_argument('-p', '--prod_dir',
help='path to folder having product and test binaries.')
parser.add_argument('-c', '--cfg_file',
help='path to test host config file.')
parser.add_argument('--me2me_manifest_file',
help='path to me2me host manifest file.')
parser.add_argument('--it2me_manifest_file',
help='path to it2me host manifest file.')
parser.add_argument(
'-u', '--user_profile_dir',
help='path to user-profile-dir, used by connect-to-host tests.')
command_line_args = parser.parse_args()
host_logs = ''
failing_tests = ''
try:
failing_tests, host_logs = main(command_line_args)
if failing_tests:
print('++++++++++FAILED TESTS++++++++++')
print(failing_tests.rstrip('\n'))
print('++++++++++++++++++++++++++++++++')
raise Exception('At least one test failed.')
finally:
# Stop host and cleanup user-profile-dir.
TestMachineCleanup(command_line_args.user_profile_dir, host_logs)
| bsd-3-clause |
chromium/chromium | chrome/installer/test/run_all_tests.cc | 311 | // Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/test/test_suite.h"
int main(int argc, char** argv) {
base::TestSuite test_suite(argc, argv);
return test_suite.Run();
}
| bsd-3-clause |
josenavas/american-gut-web | amgut/static/js/change_password.js | 2124 | function validatePassRequest() {
for(var i = 0; i < document.resetpass_request.length; i++)
{
document.resetpass_request[i].className = document.resetpass_request[i].className.replace(/(?:^|\s)highlight(?!\S)/ , '');
}
var valid = true;
if(!validateEmail(document.resetpass_request.email.value))
{
document.resetpass_request.email.className += " highlight"
valid = false;
}
if(document.resetpass_request.email.value == "")
{
document.resetpass_request.email.className += " highlight"
valid = false;
}
if(document.resetpass_request.kit_id.value == "")
{
document.resetpass_request.kit_id.className += " highlight"
valid = false;
}
if(valid)
$('#resetpass_request').submit();
}
function validatePasswords()
{
var valid = true;
if(! (document.reset_password.confirm_password.value == document.reset_password.new_password.value))
{
document.reset_password.confirm_password.className += " highlight"
valid = false;
}
if(valid)
$('#reset_password').submit();
}
$(document).ready(function(){
$("ul.subnav").parent().append("<span></span>"); //Only shows drop down trigger when js is enabled - Adds empty span tag after ul.subnav
$("ul.topnav li span").click(function() { //When trigger is clicked...
//Following events are applied to the subnav itself (moving subnav up and down)
$(this).parent().find("ul.subnav").slideDown('fast').show(); //Drop down the subnav on click
$(this).parent().hover(function() {
}, function(){
$(this).parent().find("ul.subnav").slideUp('slow'); //When the mouse hovers out of the subnav, move it back up
});
//Following events are applied to the trigger (Hover events for the trigger)
}).hover(function() {
$(this).addClass("subhover"); //On hover over, add class "subhover"
}, function(){ //On Hover Out
$(this).removeClass("subhover"); //On hover out, remove class "subhover"
});
}); | bsd-3-clause |
pmarks-net/grpc | src/python/grpcio_tests/tests/health_check/_health_servicer_test.py | 4058 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests of grpc_health.v1.health."""
import unittest
import grpc
from grpc.framework.foundation import logging_pool
from grpc_health.v1 import health
from grpc_health.v1 import health_pb2
from tests.unit.framework.common import test_constants
class HealthServicerTest(unittest.TestCase):
def setUp(self):
servicer = health.HealthServicer()
servicer.set('', health_pb2.HealthCheckResponse.SERVING)
servicer.set('grpc.test.TestServiceServing',
health_pb2.HealthCheckResponse.SERVING)
servicer.set('grpc.test.TestServiceUnknown',
health_pb2.HealthCheckResponse.UNKNOWN)
servicer.set('grpc.test.TestServiceNotServing',
health_pb2.HealthCheckResponse.NOT_SERVING)
server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(server_pool)
port = self._server.add_insecure_port('[::]:0')
health_pb2.add_HealthServicer_to_server(servicer, self._server)
self._server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
self._stub = health_pb2.HealthStub(channel)
def test_empty_service(self):
request = health_pb2.HealthCheckRequest()
resp = self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
def test_serving_service(self):
request = health_pb2.HealthCheckRequest(
service='grpc.test.TestServiceServing')
resp = self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
def test_unknown_serivce(self):
request = health_pb2.HealthCheckRequest(
service='grpc.test.TestServiceUnknown')
resp = self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.UNKNOWN, resp.status)
def test_not_serving_service(self):
request = health_pb2.HealthCheckRequest(
service='grpc.test.TestServiceNotServing')
resp = self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
resp.status)
def test_not_found_service(self):
request = health_pb2.HealthCheckRequest(service='not-found')
with self.assertRaises(grpc.RpcError) as context:
resp = self._stub.Check(request)
self.assertEqual(grpc.StatusCode.NOT_FOUND, context.exception.code())
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
chromium/chromium | android_webview/browser/gfx/child_frame.cc | 1891 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "android_webview/browser/gfx/child_frame.h"
#include <utility>
#include "base/trace_event/trace_event.h"
#include "components/viz/common/frame_sinks/copy_output_request.h"
#include "components/viz/common/quads/compositor_frame.h"
namespace android_webview {
ChildFrame::ChildFrame(
scoped_refptr<content::SynchronousCompositor::FrameFuture> frame_future,
const viz::FrameSinkId& frame_sink_id,
const gfx::Size& viewport_size_for_tile_priority,
const gfx::Transform& transform_for_tile_priority,
bool offscreen_pre_raster,
float device_scale_factor,
CopyOutputRequestQueue copy_requests,
bool did_invalidate)
: frame_future(std::move(frame_future)),
frame_sink_id(frame_sink_id),
viewport_size_for_tile_priority(viewport_size_for_tile_priority),
transform_for_tile_priority(transform_for_tile_priority),
offscreen_pre_raster(offscreen_pre_raster),
device_scale_factor(device_scale_factor),
copy_requests(std::move(copy_requests)),
did_invalidate(did_invalidate) {}
ChildFrame::~ChildFrame() {
}
void ChildFrame::WaitOnFutureIfNeeded() {
if (!frame_future)
return;
TRACE_EVENT0("android_webview", "GetFrame");
DCHECK(!frame);
auto frame_ptr = frame_future->GetFrame();
if (frame_ptr) {
layer_tree_frame_sink_id = frame_ptr->layer_tree_frame_sink_id;
frame = std::move(frame_ptr->frame);
local_surface_id = frame_ptr->local_surface_id;
hit_test_region_list = std::move(frame_ptr->hit_test_region_list);
}
frame_future = nullptr;
}
viz::SurfaceId ChildFrame::GetSurfaceId() const {
DCHECK(!frame_future);
return viz::SurfaceId(frame_sink_id, local_surface_id);
}
} // namespace android_webview
| bsd-3-clause |
chromium/chromium | chrome/test/data/extensions/api_test/service_worker/messaging/connect_to_worker/post_message/content_script.js | 444 | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
var port = chrome.runtime.connect();
port.onMessage.addListener(msg => {
chrome.test.assertEq('tab->worker->tab', msg);
chrome.test.succeed();
});
// Send message to extension SW which will reply back with the message
// 'tab->worker->tab'.
port.postMessage('tab->worker');
| bsd-3-clause |
joone/chromium-crosswalk | third_party/WebKit/Source/modules/serviceworkers/ServiceWorkerContainer.cpp | 17488 | /*
* Copyright (C) 2013 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "modules/serviceworkers/ServiceWorkerContainer.h"
#include "bindings/core/v8/ScriptPromise.h"
#include "bindings/core/v8/ScriptPromiseResolver.h"
#include "bindings/core/v8/ScriptState.h"
#include "bindings/core/v8/SerializedScriptValue.h"
#include "bindings/core/v8/SerializedScriptValueFactory.h"
#include "bindings/core/v8/V8ThrowException.h"
#include "core/dom/DOMException.h"
#include "core/dom/Document.h"
#include "core/dom/ExceptionCode.h"
#include "core/dom/ExecutionContext.h"
#include "core/dom/MessagePort.h"
#include "core/frame/LocalDOMWindow.h"
#include "core/frame/UseCounter.h"
#include "modules/EventTargetModules.h"
#include "modules/serviceworkers/ServiceWorker.h"
#include "modules/serviceworkers/ServiceWorkerContainerClient.h"
#include "modules/serviceworkers/ServiceWorkerError.h"
#include "modules/serviceworkers/ServiceWorkerMessageEvent.h"
#include "modules/serviceworkers/ServiceWorkerRegistration.h"
#include "platform/RuntimeEnabledFeatures.h"
#include "platform/weborigin/SchemeRegistry.h"
#include "public/platform/WebString.h"
#include "public/platform/WebURL.h"
#include "public/platform/modules/serviceworker/WebServiceWorker.h"
#include "public/platform/modules/serviceworker/WebServiceWorkerProvider.h"
#include "public/platform/modules/serviceworker/WebServiceWorkerRegistration.h"
namespace blink {
class RegistrationCallback : public WebServiceWorkerProvider::WebServiceWorkerRegistrationCallbacks {
public:
explicit RegistrationCallback(ScriptPromiseResolver* resolver)
: m_resolver(resolver) { }
~RegistrationCallback() override { }
void onSuccess(WebPassOwnPtr<WebServiceWorkerRegistration::Handle> handle) override
{
if (!m_resolver->executionContext() || m_resolver->executionContext()->activeDOMObjectsAreStopped())
return;
m_resolver->resolve(ServiceWorkerRegistration::getOrCreate(m_resolver->executionContext(), handle.release()));
}
void onError(const WebServiceWorkerError& error) override
{
if (!m_resolver->executionContext() || m_resolver->executionContext()->activeDOMObjectsAreStopped())
return;
m_resolver->reject(ServiceWorkerError::take(m_resolver.get(), error));
}
private:
Persistent<ScriptPromiseResolver> m_resolver;
WTF_MAKE_NONCOPYABLE(RegistrationCallback);
};
class GetRegistrationCallback : public WebServiceWorkerProvider::WebServiceWorkerGetRegistrationCallbacks {
public:
explicit GetRegistrationCallback(ScriptPromiseResolver* resolver)
: m_resolver(resolver) { }
~GetRegistrationCallback() override { }
void onSuccess(WebPassOwnPtr<WebServiceWorkerRegistration::Handle> webPassHandle) override
{
OwnPtr<WebServiceWorkerRegistration::Handle> handle = webPassHandle.release();
if (!m_resolver->executionContext() || m_resolver->executionContext()->activeDOMObjectsAreStopped())
return;
if (!handle) {
// Resolve the promise with undefined.
m_resolver->resolve();
return;
}
m_resolver->resolve(ServiceWorkerRegistration::getOrCreate(m_resolver->executionContext(), handle.release()));
}
void onError(const WebServiceWorkerError& error) override
{
if (!m_resolver->executionContext() || m_resolver->executionContext()->activeDOMObjectsAreStopped())
return;
m_resolver->reject(ServiceWorkerError::take(m_resolver.get(), error));
}
private:
Persistent<ScriptPromiseResolver> m_resolver;
WTF_MAKE_NONCOPYABLE(GetRegistrationCallback);
};
class GetRegistrationsCallback : public WebServiceWorkerProvider::WebServiceWorkerGetRegistrationsCallbacks {
public:
explicit GetRegistrationsCallback(ScriptPromiseResolver* resolver)
: m_resolver(resolver) { }
~GetRegistrationsCallback() override { }
void onSuccess(WebPassOwnPtr<WebVector<WebServiceWorkerRegistration::Handle*>> webPassRegistrations) override
{
Vector<OwnPtr<WebServiceWorkerRegistration::Handle>> handles;
OwnPtr<WebVector<WebServiceWorkerRegistration::Handle*>> webRegistrations = webPassRegistrations.release();
for (auto& handle : *webRegistrations) {
handles.append(adoptPtr(handle));
}
if (!m_resolver->executionContext() || m_resolver->executionContext()->activeDOMObjectsAreStopped())
return;
m_resolver->resolve(ServiceWorkerRegistrationArray::take(m_resolver.get(), &handles));
}
void onError(const WebServiceWorkerError& error) override
{
if (!m_resolver->executionContext() || m_resolver->executionContext()->activeDOMObjectsAreStopped())
return;
m_resolver->reject(ServiceWorkerError::take(m_resolver.get(), error));
}
private:
Persistent<ScriptPromiseResolver> m_resolver;
WTF_MAKE_NONCOPYABLE(GetRegistrationsCallback);
};
class ServiceWorkerContainer::GetRegistrationForReadyCallback : public WebServiceWorkerProvider::WebServiceWorkerGetRegistrationForReadyCallbacks {
public:
explicit GetRegistrationForReadyCallback(ReadyProperty* ready)
: m_ready(ready) { }
~GetRegistrationForReadyCallback() override { }
void onSuccess(WebPassOwnPtr<WebServiceWorkerRegistration::Handle> handle) override
{
ASSERT(m_ready->state() == ReadyProperty::Pending);
if (m_ready->executionContext() && !m_ready->executionContext()->activeDOMObjectsAreStopped())
m_ready->resolve(ServiceWorkerRegistration::getOrCreate(m_ready->executionContext(), handle.release()));
}
private:
Persistent<ReadyProperty> m_ready;
WTF_MAKE_NONCOPYABLE(GetRegistrationForReadyCallback);
};
ServiceWorkerContainer* ServiceWorkerContainer::create(ExecutionContext* executionContext)
{
return new ServiceWorkerContainer(executionContext);
}
ServiceWorkerContainer::~ServiceWorkerContainer()
{
ASSERT(!m_provider);
}
void ServiceWorkerContainer::willBeDetachedFromFrame()
{
if (m_provider) {
m_provider->setClient(0);
m_provider = nullptr;
}
}
DEFINE_TRACE(ServiceWorkerContainer)
{
visitor->trace(m_controller);
visitor->trace(m_ready);
RefCountedGarbageCollectedEventTargetWithInlineData<ServiceWorkerContainer>::trace(visitor);
ContextLifecycleObserver::trace(visitor);
}
ScriptPromise ServiceWorkerContainer::registerServiceWorker(ScriptState* scriptState, const String& url, const RegistrationOptions& options)
{
ScriptPromiseResolver* resolver = ScriptPromiseResolver::create(scriptState);
ScriptPromise promise = resolver->promise();
if (!m_provider) {
resolver->reject(DOMException::create(InvalidStateError, "Failed to register a ServiceWorker: The document is in an invalid state."));
return promise;
}
ExecutionContext* executionContext = scriptState->executionContext();
// FIXME: May be null due to worker termination: http://crbug.com/413518.
if (!executionContext)
return ScriptPromise();
RefPtr<SecurityOrigin> documentOrigin = executionContext->securityOrigin();
String errorMessage;
// Restrict to secure origins: https://w3c.github.io/webappsec/specs/powerfulfeatures/#settings-privileged
if (!executionContext->isSecureContext(errorMessage)) {
resolver->reject(DOMException::create(SecurityError, errorMessage));
return promise;
}
KURL pageURL = KURL(KURL(), documentOrigin->toString());
if (!SchemeRegistry::shouldTreatURLSchemeAsAllowingServiceWorkers(pageURL.protocol())) {
resolver->reject(DOMException::create(SecurityError, "Failed to register a ServiceWorker: The URL protocol of the current origin ('" + documentOrigin->toString() + "') is not supported."));
return promise;
}
KURL scriptURL = enteredExecutionContext(scriptState->isolate())->completeURL(url);
scriptURL.removeFragmentIdentifier();
if (!documentOrigin->canRequest(scriptURL)) {
RefPtr<SecurityOrigin> scriptOrigin = SecurityOrigin::create(scriptURL);
resolver->reject(DOMException::create(SecurityError, "Failed to register a ServiceWorker: The origin of the provided scriptURL ('" + scriptOrigin->toString() + "') does not match the current origin ('" + documentOrigin->toString() + "')."));
return promise;
}
if (!SchemeRegistry::shouldTreatURLSchemeAsAllowingServiceWorkers(scriptURL.protocol())) {
resolver->reject(DOMException::create(SecurityError, "Failed to register a ServiceWorker: The URL protocol of the script ('" + scriptURL.string() + "') is not supported."));
return promise;
}
KURL patternURL;
if (options.scope().isNull())
patternURL = KURL(scriptURL, "./");
else
patternURL = enteredExecutionContext(scriptState->isolate())->completeURL(options.scope());
patternURL.removeFragmentIdentifier();
if (!documentOrigin->canRequest(patternURL)) {
RefPtr<SecurityOrigin> patternOrigin = SecurityOrigin::create(patternURL);
resolver->reject(DOMException::create(SecurityError, "Failed to register a ServiceWorker: The origin of the provided scope ('" + patternOrigin->toString() + "') does not match the current origin ('" + documentOrigin->toString() + "')."));
return promise;
}
if (!SchemeRegistry::shouldTreatURLSchemeAsAllowingServiceWorkers(patternURL.protocol())) {
resolver->reject(DOMException::create(SecurityError, "Failed to register a ServiceWorker: The URL protocol of the scope ('" + patternURL.string() + "') is not supported."));
return promise;
}
WebString webErrorMessage;
if (!m_provider->validateScopeAndScriptURL(patternURL, scriptURL, &webErrorMessage)) {
resolver->reject(V8ThrowException::createTypeError(scriptState->isolate(), WebString::fromUTF8("Failed to register a ServiceWorker: " + webErrorMessage.utf8())));
return promise;
}
m_provider->registerServiceWorker(patternURL, scriptURL, new RegistrationCallback(resolver));
return promise;
}
ScriptPromise ServiceWorkerContainer::getRegistration(ScriptState* scriptState, const String& documentURL)
{
ScriptPromiseResolver* resolver = ScriptPromiseResolver::create(scriptState);
ScriptPromise promise = resolver->promise();
if (!m_provider) {
resolver->reject(DOMException::create(InvalidStateError, "Failed to get a ServiceWorkerRegistration: The document is in an invalid state."));
return promise;
}
ExecutionContext* executionContext = scriptState->executionContext();
// FIXME: May be null due to worker termination: http://crbug.com/413518.
if (!executionContext)
return ScriptPromise();
RefPtr<SecurityOrigin> documentOrigin = executionContext->securityOrigin();
String errorMessage;
if (!executionContext->isSecureContext(errorMessage)) {
resolver->reject(DOMException::create(SecurityError, errorMessage));
return promise;
}
KURL pageURL = KURL(KURL(), documentOrigin->toString());
if (!SchemeRegistry::shouldTreatURLSchemeAsAllowingServiceWorkers(pageURL.protocol())) {
resolver->reject(DOMException::create(SecurityError, "Failed to get a ServiceWorkerRegistration: The URL protocol of the current origin ('" + documentOrigin->toString() + "') is not supported."));
return promise;
}
KURL completedURL = enteredExecutionContext(scriptState->isolate())->completeURL(documentURL);
completedURL.removeFragmentIdentifier();
if (!documentOrigin->canRequest(completedURL)) {
RefPtr<SecurityOrigin> documentURLOrigin = SecurityOrigin::create(completedURL);
resolver->reject(DOMException::create(SecurityError, "Failed to get a ServiceWorkerRegistration: The origin of the provided documentURL ('" + documentURLOrigin->toString() + "') does not match the current origin ('" + documentOrigin->toString() + "')."));
return promise;
}
m_provider->getRegistration(completedURL, new GetRegistrationCallback(resolver));
return promise;
}
ScriptPromise ServiceWorkerContainer::getRegistrations(ScriptState* scriptState)
{
ScriptPromiseResolver* resolver = ScriptPromiseResolver::create(scriptState);
ScriptPromise promise = resolver->promise();
if (!m_provider) {
resolver->reject(DOMException::create(InvalidStateError, "Failed to get ServiceWorkerRegistration objects: The document is in an invalid state."));
return promise;
}
ExecutionContext* executionContext = scriptState->executionContext();
RefPtr<SecurityOrigin> documentOrigin = executionContext->securityOrigin();
String errorMessage;
if (!executionContext->isSecureContext(errorMessage)) {
resolver->reject(DOMException::create(SecurityError, errorMessage));
return promise;
}
KURL pageURL = KURL(KURL(), documentOrigin->toString());
if (!SchemeRegistry::shouldTreatURLSchemeAsAllowingServiceWorkers(pageURL.protocol())) {
resolver->reject(DOMException::create(SecurityError, "Failed to get ServiceWorkerRegistration objects: The URL protocol of the current origin ('" + documentOrigin->toString() + "') is not supported."));
return promise;
}
m_provider->getRegistrations(new GetRegistrationsCallback(resolver));
return promise;
}
ServiceWorkerContainer::ReadyProperty* ServiceWorkerContainer::createReadyProperty()
{
return new ReadyProperty(executionContext(), this, ReadyProperty::Ready);
}
ScriptPromise ServiceWorkerContainer::ready(ScriptState* callerState)
{
if (!executionContext())
return ScriptPromise();
if (!callerState->world().isMainWorld()) {
// FIXME: Support .ready from isolated worlds when
// ScriptPromiseProperty can vend Promises in isolated worlds.
return ScriptPromise::rejectWithDOMException(callerState, DOMException::create(NotSupportedError, "'ready' is only supported in pages."));
}
if (!m_ready) {
m_ready = createReadyProperty();
if (m_provider)
m_provider->getRegistrationForReady(new GetRegistrationForReadyCallback(m_ready.get()));
}
return m_ready->promise(callerState->world());
}
void ServiceWorkerContainer::setController(WebPassOwnPtr<WebServiceWorker::Handle> handle, bool shouldNotifyControllerChange)
{
if (!executionContext())
return;
m_controller = ServiceWorker::from(executionContext(), handle.release());
if (m_controller)
UseCounter::count(executionContext(), UseCounter::ServiceWorkerControlledPage);
if (shouldNotifyControllerChange)
dispatchEvent(Event::create(EventTypeNames::controllerchange));
}
void ServiceWorkerContainer::dispatchMessageEvent(WebPassOwnPtr<WebServiceWorker::Handle> handle, const WebString& message, const WebMessagePortChannelArray& webChannels)
{
if (!executionContext() || !executionContext()->executingWindow())
return;
MessagePortArray* ports = MessagePort::toMessagePortArray(executionContext(), webChannels);
RefPtr<SerializedScriptValue> value = SerializedScriptValueFactory::instance().createFromWire(message);
ServiceWorker* source = ServiceWorker::from(executionContext(), handle.release());
dispatchEvent(ServiceWorkerMessageEvent::create(ports, value, source, executionContext()->securityOrigin()->toString()));
}
const AtomicString& ServiceWorkerContainer::interfaceName() const
{
return EventTargetNames::ServiceWorkerContainer;
}
ServiceWorkerContainer::ServiceWorkerContainer(ExecutionContext* executionContext)
: ContextLifecycleObserver(executionContext)
, m_provider(0)
{
if (!executionContext)
return;
if (ServiceWorkerContainerClient* client = ServiceWorkerContainerClient::from(executionContext)) {
m_provider = client->provider();
if (m_provider)
m_provider->setClient(this);
}
}
} // namespace blink
| bsd-3-clause |
Ms2ger/presto-testo | core/standards/scripts/jstest-core-2/es-regression/imported-netscape/ecma/Date/15.9.3.8-4.js | 10111 | /* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://www.mozilla.org/NPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
* License for the specific language governing rights and limitations
* under the License.
*
* The Original Code is Mozilla Communicator client code, released March
* 31, 1998.
*
* The Initial Developer of the Original Code is Netscape Communications
* Corporation. Portions created by Netscape are Copyright (C) 1998
* Netscape Communications Corporation. All Rights Reserved.
*
*/
/**
File Name: 15.9.3.8.js
ECMA Section: 15.9.3.8 The Date Constructor
new Date( value )
Description: The [[Prototype]] property of the newly constructed
object is set to the original Date prototype object,
the one that is the initial valiue of Date.prototype.
The [[Class]] property of the newly constructed object is
set to "Date".
The [[Value]] property of the newly constructed object is
set as follows:
1. Call ToPrimitive(value)
2. If Type( Result(1) ) is String, then go to step 5.
3. Let V be ToNumber( Result(1) ).
4. Set the [[Value]] property of the newly constructed
object to TimeClip(V) and return.
5. Parse Result(1) as a date, in exactly the same manner
as for the parse method. Let V be the time value for
this date.
6. Go to step 4.
Author: [email protected]
Date: 28 october 1997
Version: 9706
*/
var VERSION = "ECMA_1";
startTest();
var SECTION = "15.9.3.8";
var TYPEOF = "object";
var TIME = 0;
var UTC_YEAR = 1;
var UTC_MONTH = 2;
var UTC_DATE = 3;
var UTC_DAY = 4;
var UTC_HOURS = 5;
var UTC_MINUTES = 6;
var UTC_SECONDS = 7;
var UTC_MS = 8;
var YEAR = 9;
var MONTH = 10;
var DATE = 11;
var DAY = 12;
var HOURS = 13;
var MINUTES = 14;
var SECONDS = 15;
var MS = 16;
// for TCMS, the testcases array must be global.
var tc= 0;
var TITLE = "Date constructor: new Date( value )";
var SECTION = "15.9.3.8";
var VERSION = "ECMA_1";
startTest();
writeHeaderToLog( SECTION +" " + TITLE );
testcases = new Array();
getTestCases();
// all tests must call a function that returns a boolean value
test();
function getTestCases( ) {
var TZ_ADJUST = -TZ_DIFF * msPerHour;
// Dates around Feb 29, 2000
var UTC_FEB_29_2000 = TIME_2000 + 31*msPerDay + 28*msPerDay;
var PST_FEB_29_2000 = UTC_FEB_29_2000 + TZ_ADJUST;
addNewTestCase( new Date(UTC_FEB_29_2000),
"new Date("+UTC_FEB_29_2000+")",
[UTC_FEB_29_2000,2000,1,29,2,0,0,0,0,2000,1,28,1,16,0,0,0] );
addNewTestCase( new Date(PST_FEB_29_2000),
"new Date("+PST_FEB_29_2000+")",
[PST_FEB_29_2000,2000,1,29,2,8,0,0,0,2000,1,29,2,0,0,0,0] );
addNewTestCase( new Date( (new Date(UTC_FEB_29_2000)).toString() ),
"new Date(\""+(new Date(UTC_FEB_29_2000)).toString()+"\")",
[UTC_FEB_29_2000,2000,1,29,2,0,0,0,0,2000,1,28,1,16,0,0,0] );
addNewTestCase( new Date( (new Date(PST_FEB_29_2000)).toString() ),
"new Date(\""+(new Date(PST_FEB_29_2000)).toString()+"\")",
[PST_FEB_29_2000,2000,1,29,2,8,0,0,0,2000,1,29,2,0,0,0,0] );
addNewTestCase( new Date( (new Date(UTC_FEB_29_2000)).toGMTString() ),
"new Date(\""+(new Date(UTC_FEB_29_2000)).toGMTString()+"\")",
[UTC_FEB_29_2000,2000,1,29,2,0,0,0,0,2000,1,28,1,16,0,0,0] );
addNewTestCase( new Date( (new Date(PST_FEB_29_2000)).toGMTString() ),
"new Date(\""+(new Date(PST_FEB_29_2000)).toGMTString()+"\")",
[PST_FEB_29_2000,2000,1,29,2,8,0,0,0,2000,1,29,2,0,0,0,0] );
/*
// Dates around 1900
var PST_1900 = TIME_1900 + 8*msPerHour;
addNewTestCase( new Date( TIME_1900 ),
"new Date("+TIME_1900+")",
[TIME_1900,1900,0,1,1,0,0,0,0,1899,11,31,0,16,0,0,0] );
addNewTestCase( new Date(PST_1900),
"new Date("+PST_1900+")",
[ PST_1900,1900,0,1,1,8,0,0,0,1900,0,1,1,0,0,0,0] );
addNewTestCase( new Date( (new Date(TIME_1900)).toString() ),
"new Date(\""+(new Date(TIME_1900)).toString()+"\")",
[TIME_1900,1900,0,1,1,0,0,0,0,1899,11,31,0,16,0,0,0] );
addNewTestCase( new Date( (new Date(PST_1900)).toString() ),
"new Date(\""+(new Date(PST_1900 )).toString()+"\")",
[ PST_1900,1900,0,1,1,8,0,0,0,1900,0,1,1,0,0,0,0] );
addNewTestCase( new Date( (new Date(TIME_1900)).toUTCString() ),
"new Date(\""+(new Date(TIME_1900)).toUTCString()+"\")",
[TIME_1900,1900,0,1,1,0,0,0,0,1899,11,31,0,16,0,0,0] );
addNewTestCase( new Date( (new Date(PST_1900)).toUTCString() ),
"new Date(\""+(new Date(PST_1900 )).toUTCString()+"\")",
[ PST_1900,1900,0,1,1,8,0,0,0,1900,0,1,1,0,0,0,0] );
// addNewTestCase( "new Date(\""+(new Date(TIME_1900)).toLocaleString()+"\")", [TIME_1900,1900,0,1,1,0,0,0,0,1899,11,31,0,16,0,0,0] );
// addNewTestCase( "new Date(\""+(new Date(PST_1900 )).toLocaleString()+"\")", [ PST_1900,1900,0,1,1,8,0,0,0,1900,0,1,1,0,0,0,0] );
*/
/*
This test case is incorrect. Need to fix the DaylightSavings functions in
shell.js for this to work properly.
var DST_START_1998 = UTC( GetFirstSundayInApril(TimeFromYear(1998)) + 2*msPerHour )
addNewTestCase( new Date(DST_START_1998-1),
"new Date("+(DST_START_1998-1)+")",
[DST_START_1998-1,1998,3,5,0,9,59,59,999,1998,3,5,0,1,59,59,999] );
addNewTestCase( new Date(DST_START_1998),
"new Date("+DST_START_1998+")",
[DST_START_1998,1998,3,5,0,10,0,0,0,1998,3,5,0,3,0,0,0]);
var DST_END_1998 = UTC( GetLastSundayInOctober(TimeFromYear(1998)) + 2*msPerHour );
addNewTestCase ( new Date(DST_END_1998-1),
"new Date("+(DST_END_1998-1)+")",
[DST_END_1998-1,1998,9,25,0,8,59,59,999,1998,9,25,0,1,59,59,999] );
addNewTestCase ( new Date(DST_END_1998),
"new Date("+DST_END_1998+")",
[DST_END_1998,1998,9,25,0,9,0,0,0,1998,9,25,0,1,0,0,0] );
*/
}
function addNewTestCase( DateCase, DateString, ResultArray ) {
DateCase = DateCase;
item = testcases.length;
testcases[item++] = new TestCase( SECTION, DateString+".getTime()", ResultArray[TIME], DateCase.getTime() );
testcases[item++] = new TestCase( SECTION, DateString+".valueOf()", ResultArray[TIME], DateCase.valueOf() );
testcases[item++] = new TestCase( SECTION, DateString+".getUTCFullYear()", ResultArray[UTC_YEAR], DateCase.getUTCFullYear() );
testcases[item++] = new TestCase( SECTION, DateString+".getUTCMonth()", ResultArray[UTC_MONTH], DateCase.getUTCMonth() );
testcases[item++] = new TestCase( SECTION, DateString+".getUTCDate()", ResultArray[UTC_DATE], DateCase.getUTCDate() );
testcases[item++] = new TestCase( SECTION, DateString+".getUTCDay()", ResultArray[UTC_DAY], DateCase.getUTCDay() );
testcases[item++] = new TestCase( SECTION, DateString+".getUTCHours()", ResultArray[UTC_HOURS], DateCase.getUTCHours() );
testcases[item++] = new TestCase( SECTION, DateString+".getUTCMinutes()", ResultArray[UTC_MINUTES],DateCase.getUTCMinutes() );
testcases[item++] = new TestCase( SECTION, DateString+".getUTCSeconds()", ResultArray[UTC_SECONDS],DateCase.getUTCSeconds() );
testcases[item++] = new TestCase( SECTION, DateString+".getUTCMilliseconds()", ResultArray[UTC_MS], DateCase.getUTCMilliseconds() );
testcases[item++] = new TestCase( SECTION, DateString+".getFullYear()", ResultArray[YEAR], DateCase.getFullYear() );
testcases[item++] = new TestCase( SECTION, DateString+".getMonth()", ResultArray[MONTH], DateCase.getMonth() );
testcases[item++] = new TestCase( SECTION, DateString+".getDate()", ResultArray[DATE], DateCase.getDate() );
testcases[item++] = new TestCase( SECTION, DateString+".getDay()", ResultArray[DAY], DateCase.getDay() );
testcases[item++] = new TestCase( SECTION, DateString+".getHours()", ResultArray[HOURS], DateCase.getHours() );
testcases[item++] = new TestCase( SECTION, DateString+".getMinutes()", ResultArray[MINUTES], DateCase.getMinutes() );
testcases[item++] = new TestCase( SECTION, DateString+".getSeconds()", ResultArray[SECONDS], DateCase.getSeconds() );
testcases[item++] = new TestCase( SECTION, DateString+".getMilliseconds()", ResultArray[MS], DateCase.getMilliseconds() );
}
function test() {
for( tc = 0; tc < testcases.length; tc++ ) {
testcases[tc].passed = writeTestCaseResult(
testcases[tc].expect,
testcases[tc].actual,
testcases[tc].description +" = " +
testcases[tc].actual );
}
stopTest();
// all tests must return a boolean value
return ( testcases );
}
| bsd-3-clause |
mkoistinen/django-cms | cms/tests/frontend/integration/permissions-enabled.js | 1357 | 'use strict';
var helpers = require('djangocms-casper-helpers');
var globals = helpers.settings;
var cms = helpers();
casper.test.setUp(function(done) {
casper.start().then(cms.login()).then(cms.addPage({ title: 'Homepage' })).run(done);
});
casper.test.tearDown(function(done) {
casper.start().then(cms.removePage()).then(cms.logout()).run(done);
});
casper.test.begin('Permissions action is available', function(test) {
casper
.start()
.thenOpen(globals.editUrl)
.then(cms.openSideframe())
// switch to sideframe
.withFrame(0, function() {
casper
.waitForSelector('.cms-pagetree-jstree')
.wait(3000)
.then(cms.expandPageTree())
.then(function() {
var pageId = cms.getPageId('Homepage');
this.click('.js-cms-pagetree-options[data-id="' + pageId + '"]');
})
.then(cms.waitUntilActionsDropdownLoaded())
.then(function() {
test.assertExists(
'.cms-pagetree-dropdown-menu-open a[href*="permission-settings"]',
'Permission settings exist in the menu'
);
});
})
.run(function() {
test.done();
});
});
| bsd-3-clause |
rs2/bokeh | examples/howto/us_marriages_divorces/us_marriages_divorces_interactive.py | 3169 | # coding: utf-8
# Plotting U.S. marriage and divorce statistics
#
# Example code by Randal S. Olson (http://www.randalolson.com)
from bokeh.plotting import figure, show, output_file, ColumnDataSource
from bokeh.models import HoverTool, NumeralTickFormatter
from bokeh.models import SingleIntervalTicker, LinearAxis
# Since the data set is loaded in the bokeh data repository, we can do this:
from bokeh.sampledata.us_marriages_divorces import data
md_data = data.copy()
# Fill in missing data with a simple linear interpolation
md_data = md_data.interpolate(method='linear', axis=0).ffill().bfill()
# Tell Bokeh where to save the interactive chart
output_file('us_marriages_divorces_per_capita.html',
title='144 years of marriage and divorce in the U.S.A.')
# Set up the data sources for the lines we'll be plotting.
# We need separate data sources for each line because we're
# displaying different data in the hover tool.
source_marriages = ColumnDataSource(
data=dict(
# x-axis (Years) for the chart
x=md_data.Year.values,
# y-axis (Marriages per capita) for the chart
y=md_data.Marriages_per_1000.values,
# The string version of the y-value that is displayed in the hover box
y_text=md_data.Marriages_per_1000.apply(
lambda x: '{}'.format(round(x, 1))),
# Extra descriptive text that is displayed in the hover box
desc=['marriages per 1,000 people'] * len(md_data),
)
)
source_divorces = ColumnDataSource(
data=dict(
# x-axis (Years) for the chart
x=md_data.Year.values,
# y-axis (Marriages per capita) for the chart
y=md_data.Divorces_per_1000.values,
# The string version of the y-value that is displayed in the hover box
y_text=md_data.Divorces_per_1000.apply(
lambda x: '{}'.format(round(x, 1))),
# Extra descriptive text that is displayed in the hover box
desc=['divorces and annulments per 1,000 people'] * len(md_data),
)
)
# Use HTML to mark up the tooltip that displays over the chart
# Note that the variables in the data sources (above) are referenced with a @
hover = HoverTool(
tooltips='<font face="Arial" size="3">@y_text @desc in @x</font>',
mode='vline')
# Select the tools that will be available to the chart
TOOLS = ['pan,wheel_zoom,box_zoom,reset,save'] + [hover]
bplot = figure(tools=TOOLS, plot_width=800, plot_height=500, x_axis_type=None)
# Create a custom x-axis with 10-year intervals
ticker = SingleIntervalTicker(interval=10, num_minor_ticks=0)
xaxis = LinearAxis(ticker=ticker)
bplot.add_layout(xaxis, 'below')
# Customize the y-axis
bplot.yaxis.formatter = NumeralTickFormatter(format='0.0a')
bplot.yaxis.axis_label = '# per 1,000 people'
# Provide a descriptive title for the chart
bplot.title.text = '144 years of marriage and divorce in the U.S.'
# Finally, plot the data!
# Note that the data source determines what is plotted and what shows in
# the tooltips
bplot.line('x', 'y', color='#1f77b4', line_width=3, source=source_marriages)
bplot.line('x', 'y', color='#ff7f0e', line_width=3, source=source_divorces)
show(bplot)
| bsd-3-clause |
adobe/chromium | ppapi/proxy/ppp_video_decoder_proxy.cc | 5699 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ppapi/proxy/ppp_video_decoder_proxy.h"
#include "ppapi/proxy/host_dispatcher.h"
#include "ppapi/proxy/plugin_globals.h"
#include "ppapi/proxy/plugin_resource_tracker.h"
#include "ppapi/proxy/ppapi_messages.h"
#include "ppapi/proxy/ppb_video_decoder_proxy.h"
#include "ppapi/thunk/enter.h"
#include "ppapi/thunk/ppb_video_decoder_api.h"
#include "ppapi/thunk/thunk.h"
using ppapi::thunk::PPB_VideoDecoder_API;
namespace ppapi {
namespace proxy {
namespace {
void ProvidePictureBuffers(PP_Instance instance, PP_Resource decoder,
uint32_t req_num_of_bufs,
const PP_Size* dimensions) {
HostResource decoder_resource;
decoder_resource.SetHostResource(instance, decoder);
HostDispatcher::GetForInstance(instance)->Send(
new PpapiMsg_PPPVideoDecoder_ProvidePictureBuffers(
API_ID_PPP_VIDEO_DECODER_DEV,
decoder_resource, req_num_of_bufs, *dimensions));
}
void DismissPictureBuffer(PP_Instance instance, PP_Resource decoder,
int32_t picture_buffer_id) {
HostResource decoder_resource;
decoder_resource.SetHostResource(instance, decoder);
HostDispatcher::GetForInstance(instance)->Send(
new PpapiMsg_PPPVideoDecoder_DismissPictureBuffer(
API_ID_PPP_VIDEO_DECODER_DEV,
decoder_resource, picture_buffer_id));
}
void PictureReady(PP_Instance instance, PP_Resource decoder,
const PP_Picture_Dev* picture) {
HostResource decoder_resource;
decoder_resource.SetHostResource(instance, decoder);
HostDispatcher::GetForInstance(instance)->Send(
new PpapiMsg_PPPVideoDecoder_PictureReady(
API_ID_PPP_VIDEO_DECODER_DEV, decoder_resource, *picture));
}
void NotifyError(PP_Instance instance, PP_Resource decoder,
PP_VideoDecodeError_Dev error) {
HostResource decoder_resource;
decoder_resource.SetHostResource(instance, decoder);
HostDispatcher::GetForInstance(instance)->Send(
new PpapiMsg_PPPVideoDecoder_NotifyError(
API_ID_PPP_VIDEO_DECODER_DEV, decoder_resource, error));
}
static const PPP_VideoDecoder_Dev video_decoder_interface = {
&ProvidePictureBuffers,
&DismissPictureBuffer,
&PictureReady,
&NotifyError
};
InterfaceProxy* CreateVideoDecoderPPPProxy(Dispatcher* dispatcher) {
return new PPP_VideoDecoder_Proxy(dispatcher);
}
} // namespace
PPP_VideoDecoder_Proxy::PPP_VideoDecoder_Proxy(Dispatcher* dispatcher)
: InterfaceProxy(dispatcher),
ppp_video_decoder_impl_(NULL) {
if (dispatcher->IsPlugin()) {
ppp_video_decoder_impl_ = static_cast<const PPP_VideoDecoder_Dev*>(
dispatcher->local_get_interface()(PPP_VIDEODECODER_DEV_INTERFACE));
}
}
PPP_VideoDecoder_Proxy::~PPP_VideoDecoder_Proxy() {
}
// static
const InterfaceProxy::Info* PPP_VideoDecoder_Proxy::GetInfo() {
static const Info info = {
&video_decoder_interface,
PPP_VIDEODECODER_DEV_INTERFACE,
API_ID_PPP_VIDEO_DECODER_DEV,
false,
&CreateVideoDecoderPPPProxy,
};
return &info;
}
bool PPP_VideoDecoder_Proxy::OnMessageReceived(const IPC::Message& msg) {
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(PPP_VideoDecoder_Proxy, msg)
IPC_MESSAGE_HANDLER(PpapiMsg_PPPVideoDecoder_ProvidePictureBuffers,
OnMsgProvidePictureBuffers)
IPC_MESSAGE_HANDLER(PpapiMsg_PPPVideoDecoder_DismissPictureBuffer,
OnMsgDismissPictureBuffer)
IPC_MESSAGE_HANDLER(PpapiMsg_PPPVideoDecoder_PictureReady,
OnMsgPictureReady)
IPC_MESSAGE_HANDLER(PpapiMsg_PPPVideoDecoder_NotifyError,
OnMsgNotifyError)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
DCHECK(handled);
return handled;
}
void PPP_VideoDecoder_Proxy::OnMsgProvidePictureBuffers(
const HostResource& decoder, uint32_t req_num_of_bufs,
const PP_Size& dimensions) {
PP_Resource plugin_decoder = PluginGlobals::Get()->plugin_resource_tracker()->
PluginResourceForHostResource(decoder);
CallWhileUnlocked(ppp_video_decoder_impl_->ProvidePictureBuffers,
decoder.instance(),
plugin_decoder,
req_num_of_bufs,
&dimensions);
}
void PPP_VideoDecoder_Proxy::OnMsgDismissPictureBuffer(
const HostResource& decoder, int32_t picture_id) {
PP_Resource plugin_decoder = PluginGlobals::Get()->plugin_resource_tracker()->
PluginResourceForHostResource(decoder);
CallWhileUnlocked(ppp_video_decoder_impl_->DismissPictureBuffer,
decoder.instance(),
plugin_decoder,
picture_id);
}
void PPP_VideoDecoder_Proxy::OnMsgPictureReady(
const HostResource& decoder, const PP_Picture_Dev& picture) {
PP_Resource plugin_decoder = PluginGlobals::Get()->plugin_resource_tracker()->
PluginResourceForHostResource(decoder);
CallWhileUnlocked(ppp_video_decoder_impl_->PictureReady,
decoder.instance(),
plugin_decoder,
&picture);
}
void PPP_VideoDecoder_Proxy::OnMsgNotifyError(
const HostResource& decoder, PP_VideoDecodeError_Dev error) {
PP_Resource plugin_decoder = PluginGlobals::Get()->plugin_resource_tracker()->
PluginResourceForHostResource(decoder);
CallWhileUnlocked(ppp_video_decoder_impl_->NotifyError,
decoder.instance(),
plugin_decoder,
error);
}
} // namespace proxy
} // namespace ppapi
| bsd-3-clause |
renyuneyun/core | src/Settings/DatabaseSettingsRepository.php | 1225 | <?php
/*
* This file is part of Flarum.
*
* (c) Toby Zerner <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Flarum\Settings;
use Illuminate\Database\ConnectionInterface;
class DatabaseSettingsRepository implements SettingsRepositoryInterface
{
protected $database;
public function __construct(ConnectionInterface $connection)
{
$this->database = $connection;
}
public function all()
{
return $this->database->table('settings')->lists('value', 'key');
}
public function get($key, $default = null)
{
if (is_null($value = $this->database->table('settings')->where('key', $key)->value('value'))) {
return $default;
}
return $value;
}
public function set($key, $value)
{
$query = $this->database->table('settings')->where('key', $key);
$method = $query->exists() ? 'update' : 'insert';
$query->$method(compact('key', 'value'));
}
public function delete($keyLike)
{
$this->database->table('settings')->where('key', 'like', $keyLike)->delete();
}
}
| mit |
Jiayili1/corefx | src/System.Drawing.Common/tests/Performance/CustomAssemblyAttributes.cs | 344 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Xunit;
[assembly: TestCaseOrderer("Microsoft.DotNet.XUnitExtensions.BenchmarkFilter", "System.Drawing.Common.Performance.Tests")]
| mit |
justyns/home-assistant | homeassistant/components/switch/mystrom.py | 3128 | """
Support for myStrom switches.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.mystrom/
"""
import logging
import requests
from homeassistant.components.switch import SwitchDevice
DEFAULT_NAME = 'myStrom Switch'
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Find and return myStrom switch."""
host = config.get('host')
if host is None:
_LOGGER.error('Missing required variable: host')
return False
resource = 'http://{}'.format(host)
try:
requests.get(resource, timeout=10)
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to device %s. "
"Please check the IP address in the configuration file",
host)
return False
add_devices([MyStromSwitch(
config.get('name', DEFAULT_NAME),
resource)])
class MyStromSwitch(SwitchDevice):
"""Representation of a myStrom switch."""
def __init__(self, name, resource):
"""Initialize the myStrom switch."""
self._state = False
self._name = name
self._resource = resource
self.consumption = 0
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
@property
def current_power_mwh(self):
"""Return the urrent power consumption in mWh."""
return self.consumption
def turn_on(self, **kwargs):
"""Turn the switch on."""
try:
request = requests.get('{}/relay'.format(self._resource),
params={'state': '1'},
timeout=10)
if request.status_code == 200:
self._state = True
except requests.exceptions.ConnectionError:
_LOGGER.error("Can't turn on %s. Is device offline?",
self._resource)
def turn_off(self, **kwargs):
"""Turn the switch off."""
try:
request = requests.get('{}/relay'.format(self._resource),
params={'state': '0'},
timeout=10)
if request.status_code == 200:
self._state = False
except requests.exceptions.ConnectionError:
_LOGGER.error("Can't turn on %s. Is device offline?",
self._resource)
def update(self):
"""Get the latest data from REST API and update the state."""
try:
request = requests.get('{}/report'.format(self._resource),
timeout=10)
data = request.json()
self._state = bool(data['relay'])
self.consumption = data['power']
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to device '%s'. Is device offline?",
self._resource)
| mit |
darklilium/Factigis_2 | arcgis_js_api/library/3.17/3.17compact/dojox/widget/nls/de/Wizard.js | 100 | //>>built
define("dojox/widget/nls/de/Wizard",{next:"Weiter",previous:"Zur\u00fcck",done:"Fertig"}); | mit |
Gargol/Ghost | core/server/api/v2/notifications.js | 8260 | const moment = require('moment-timezone');
const semver = require('semver');
const Promise = require('bluebird');
const _ = require('lodash');
const settingsCache = require('../../services/settings/cache');
const ghostVersion = require('../../lib/ghost-version');
const common = require('../../lib/common');
const ObjectId = require('bson-objectid');
const api = require('./index');
const internalContext = {context: {internal: true}};
const _private = {};
_private.fetchAllNotifications = () => {
let allNotifications = settingsCache.get('notifications');
allNotifications.forEach((notification) => {
notification.addedAt = moment(notification.addedAt).toDate();
});
return allNotifications;
};
_private.wasSeen = (notification, user) => {
if (notification.seenBy === undefined) {
return notification.seen;
} else {
return notification.seenBy.includes(user.id);
}
};
module.exports = {
docName: 'notifications',
browse: {
permissions: true,
query(frame) {
let allNotifications = _private.fetchAllNotifications();
allNotifications = _.orderBy(allNotifications, 'addedAt', 'desc');
allNotifications = allNotifications.filter((notification) => {
// NOTE: Filtering by version below is just a patch for bigger problem - notifications are not removed
// after Ghost update. Logic below should be removed when Ghost upgrade detection
// is done (https://github.com/TryGhost/Ghost/issues/10236) and notifications are
// be removed permanently on upgrade event.
const ghost20RegEx = /Ghost 2.0 is now available/gi;
// CASE: do not return old release notification
if (notification.message && (!notification.custom || notification.message.match(ghost20RegEx))) {
let notificationVersion = notification.message.match(/(\d+\.)(\d+\.)(\d+)/);
if (notification.message.match(ghost20RegEx)) {
notificationVersion = '2.0.0';
} else if (notificationVersion){
notificationVersion = notificationVersion[0];
}
const blogVersion = ghostVersion.full.match(/^(\d+\.)(\d+\.)(\d+)/);
if (notificationVersion && blogVersion && semver.gt(notificationVersion, blogVersion[0])) {
return true;
} else {
return false;
}
}
return !_private.wasSeen(notification, frame.user);
});
return allNotifications;
}
},
add: {
statusCode(result) {
if (result.notifications.length) {
return 201;
} else {
return 200;
}
},
permissions: true,
query(frame) {
const defaults = {
dismissible: true,
location: 'bottom',
status: 'alert',
id: ObjectId.generate()
};
const overrides = {
seen: false,
addedAt: moment().toDate()
};
let notificationsToCheck = frame.data.notifications;
let notificationsToAdd = [];
const allNotifications = _private.fetchAllNotifications();
notificationsToCheck.forEach((notification) => {
const isDuplicate = allNotifications.find((n) => {
return n.id === notification.id;
});
if (!isDuplicate) {
notificationsToAdd.push(Object.assign({}, defaults, notification, overrides));
}
});
const hasReleaseNotification = notificationsToCheck.find((notification) => {
return !notification.custom;
});
// CASE: remove any existing release notifications if a new release notification comes in
if (hasReleaseNotification) {
_.remove(allNotifications, (el) => {
return !el.custom;
});
}
// CASE: nothing to add, skip
if (!notificationsToAdd.length) {
return Promise.resolve();
}
const releaseNotificationsToAdd = notificationsToAdd.filter((notification) => {
return !notification.custom;
});
// CASE: reorder notifications before save
if (releaseNotificationsToAdd.length > 1) {
notificationsToAdd = notificationsToAdd.filter((notification) => {
return notification.custom;
});
notificationsToAdd.push(_.orderBy(releaseNotificationsToAdd, 'created_at', 'desc')[0]);
}
return api.settings.edit({
settings: [{
key: 'notifications',
// @NOTE: We always need to store all notifications!
value: allNotifications.concat(notificationsToAdd)
}]
}, internalContext).then(() => {
return notificationsToAdd;
});
}
},
destroy: {
statusCode: 204,
options: ['notification_id'],
validation: {
options: {
notification_id: {
required: true
}
}
},
permissions: true,
query(frame) {
const allNotifications = _private.fetchAllNotifications();
const notificationToMarkAsSeen = allNotifications.find((notification) => {
return notification.id === frame.options.notification_id;
}),
notificationToMarkAsSeenIndex = allNotifications.findIndex((notification) => {
return notification.id === frame.options.notification_id;
});
if (notificationToMarkAsSeenIndex > -1 && !notificationToMarkAsSeen.dismissible) {
return Promise.reject(new common.errors.NoPermissionError({
message: common.i18n.t('errors.api.notifications.noPermissionToDismissNotif')
}));
}
if (notificationToMarkAsSeenIndex < 0) {
return Promise.reject(new common.errors.NotFoundError({
message: common.i18n.t('errors.api.notifications.notificationDoesNotExist')
}));
}
if (_private.wasSeen(notificationToMarkAsSeen, frame.user)) {
return Promise.resolve();
}
// @NOTE: We don't remove the notifications, because otherwise we will receive them again from the service.
allNotifications[notificationToMarkAsSeenIndex].seen = true;
if (!allNotifications[notificationToMarkAsSeenIndex].seenBy) {
allNotifications[notificationToMarkAsSeenIndex].seenBy = [];
}
allNotifications[notificationToMarkAsSeenIndex].seenBy.push(frame.user.id);
return api.settings.edit({
settings: [{
key: 'notifications',
value: allNotifications
}]
}, internalContext).return();
}
},
/**
* Clears all notifications. Method used in tests only
*
* @private Not exposed over HTTP
*/
destroyAll: {
statusCode: 204,
permissions: {
method: 'destroy'
},
query() {
const allNotifications = _private.fetchAllNotifications();
allNotifications.forEach((notification) => {
// @NOTE: We don't remove the notifications, because otherwise we will receive them again from the service.
notification.seen = true;
});
return api.settings.edit({
settings: [{
key: 'notifications',
value: allNotifications
}]
}, internalContext).return();
}
}
};
| mit |
shopov91/Telerik-Academy-High-Quality-Code-Course | Design-Patterns/06. DI and IoC containers/demos/ConsoleWebServer - mid/ConsoleWebServer.Framework/Handlers/OptionsHandler.cs | 1254 | namespace ConsoleWebServer.Framework.Handlers
{
using System;
using System.Linq;
using System.Net;
using System.Reflection;
using ConsoleWebServer.Framework.ActionResults;
public class OptionsHandler : Handler
{
protected override bool CanHandle(IHttpRequest request)
{
return request.Method.ToLower() == "options";
}
protected override HttpResponse Handle(IHttpRequest request)
{
var routes =
Assembly.GetEntryAssembly()
.GetTypes()
.Where(x => x.Name.EndsWith("Controller") && typeof(Controller).IsAssignableFrom(x))
.Select(
x => new { x.Name, Methods = x.GetMethods().Where(m => m.ReturnType == typeof(IActionResult)) })
.SelectMany(
x =>
x.Methods.Select(
m =>
string.Format("/{0}/{1}/{{parameter}}", x.Name.Replace("Controller", string.Empty), m.Name)))
.ToList();
return new HttpResponse(request.ProtocolVersion, HttpStatusCode.OK, string.Join(Environment.NewLine, routes));
}
}
}
| mit |
mandino/hotelmilosantabarbara.com | wp-content/plugins/events-calendar-pro/src/Tribe/Asset/Ajax_Photoview.php | 1313 | <?php
class Tribe__Events__Pro__Asset__Ajax_Photoview extends Tribe__Events__Asset__Abstract_Asset {
public function handle() {
$tribe_paged = ( ! empty( $_REQUEST['tribe_paged'] ) ) ? $_REQUEST['tribe_paged'] : 0;
$ajax_data = array(
'ajaxurl' => admin_url( 'admin-ajax.php', ( is_ssl() ? 'https' : 'http' ) ),
'tribe_paged' => $tribe_paged,
);
$version = apply_filters( 'tribe_events_pro_js_version', Tribe__Events__Pro__Main::VERSION );
$imagesloaded_path = Tribe__Events__Template_Factory::getMinFile( $this->vendor_url . 'imagesloaded/imagesloaded.pkgd.js', true );
wp_enqueue_script( 'tribe-events-pro-imagesloaded', $imagesloaded_path, array( 'tribe-events-pro' ), $version, true );
$isotope_path = Tribe__Events__Template_Factory::getMinFile( $this->vendor_url . 'isotope/isotope.pkgd.js', true );
wp_enqueue_script( 'tribe-events-pro-isotope', $isotope_path, array( 'tribe-events-pro-imagesloaded' ), $version, true );
$photoview_path = Tribe__Events__Template_Factory::getMinFile( tribe_events_pro_resource_url( 'tribe-events-photo-view.js' ), true );
wp_enqueue_script( 'tribe-events-pro-photo', $photoview_path, array( 'tribe-events-pro-isotope' ), $version, true );
wp_localize_script( 'tribe-events-pro-photo', 'TribePhoto', $ajax_data );
}
}
| mit |
ryanbradynd05/polyflix | server/node/tasks/config/cssmin.js | 555 | /**
* Compress CSS files.
*
* ---------------------------------------------------------------
*
* Minifies css files and places them into .tmp/public/min directory.
*
*/
module.exports = function(gulp, plugins, growl) {
gulp.task('cssmin:dist', function() {
return gulp.src('.tmp/public/concat/production.css')
.pipe(plugins.rename({ suffix: '.min' }))
.pipe(plugins.minifyCss())
.pipe(gulp.dest('./.tmp/public/min'))
.pipe(plugins.if(growl, plugins.notify({ message: 'Minify CSS task complete' })));
});
};
| mit |
MTASZTAKI/ApertusVR | plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/deps/v8/src/ia32/frame-constants-ia32.cc | 910 | // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_IA32
#include "src/assembler.h"
#include "src/frame-constants.h"
#include "src/ia32/assembler-ia32-inl.h"
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/frame-constants-ia32.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return ebp; }
Register JavaScriptFrame::context_register() { return esi; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
USE(register_count);
return 0;
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_IA32
| mit |
guncoin/guncoin | src/wallet/feebumper.cpp | 11910 | // Copyright (c) 2017-2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <consensus/validation.h>
#include <wallet/coincontrol.h>
#include <wallet/feebumper.h>
#include <wallet/fees.h>
#include <wallet/wallet.h>
#include <policy/fees.h>
#include <policy/policy.h>
#include <policy/rbf.h>
#include <validation.h> //for mempool access
#include <txmempool.h>
#include <utilmoneystr.h>
#include <util.h>
#include <net.h>
//! Check whether transaction has descendant in wallet or mempool, or has been
//! mined, or conflicts with a mined transaction. Return a feebumper::Result.
static feebumper::Result PreconditionChecks(const CWallet* wallet, const CWalletTx& wtx, std::vector<std::string>& errors) EXCLUSIVE_LOCKS_REQUIRED(wallet->cs_wallet)
{
if (wallet->HasWalletSpend(wtx.GetHash())) {
errors.push_back("Transaction has descendants in the wallet");
return feebumper::Result::INVALID_PARAMETER;
}
{
LOCK(mempool.cs);
auto it_mp = mempool.mapTx.find(wtx.GetHash());
if (it_mp != mempool.mapTx.end() && it_mp->GetCountWithDescendants() > 1) {
errors.push_back("Transaction has descendants in the mempool");
return feebumper::Result::INVALID_PARAMETER;
}
}
if (wtx.GetDepthInMainChain() != 0) {
errors.push_back("Transaction has been mined, or is conflicted with a mined transaction");
return feebumper::Result::WALLET_ERROR;
}
if (!SignalsOptInRBF(*wtx.tx)) {
errors.push_back("Transaction is not BIP 125 replaceable");
return feebumper::Result::WALLET_ERROR;
}
if (wtx.mapValue.count("replaced_by_txid")) {
errors.push_back(strprintf("Cannot bump transaction %s which was already bumped by transaction %s", wtx.GetHash().ToString(), wtx.mapValue.at("replaced_by_txid")));
return feebumper::Result::WALLET_ERROR;
}
// check that original tx consists entirely of our inputs
// if not, we can't bump the fee, because the wallet has no way of knowing the value of the other inputs (thus the fee)
if (!wallet->IsAllFromMe(*wtx.tx, ISMINE_SPENDABLE)) {
errors.push_back("Transaction contains inputs that don't belong to this wallet");
return feebumper::Result::WALLET_ERROR;
}
return feebumper::Result::OK;
}
namespace feebumper {
bool TransactionCanBeBumped(const CWallet* wallet, const uint256& txid)
{
LOCK2(cs_main, wallet->cs_wallet);
const CWalletTx* wtx = wallet->GetWalletTx(txid);
if (wtx == nullptr) return false;
std::vector<std::string> errors_dummy;
feebumper::Result res = PreconditionChecks(wallet, *wtx, errors_dummy);
return res == feebumper::Result::OK;
}
Result CreateTransaction(const CWallet* wallet, const uint256& txid, const CCoinControl& coin_control, CAmount total_fee, std::vector<std::string>& errors,
CAmount& old_fee, CAmount& new_fee, CMutableTransaction& mtx)
{
LOCK2(cs_main, wallet->cs_wallet);
errors.clear();
auto it = wallet->mapWallet.find(txid);
if (it == wallet->mapWallet.end()) {
errors.push_back("Invalid or non-wallet transaction id");
return Result::INVALID_ADDRESS_OR_KEY;
}
const CWalletTx& wtx = it->second;
Result result = PreconditionChecks(wallet, wtx, errors);
if (result != Result::OK) {
return result;
}
// figure out which output was change
// if there was no change output or multiple change outputs, fail
int nOutput = -1;
for (size_t i = 0; i < wtx.tx->vout.size(); ++i) {
if (wallet->IsChange(wtx.tx->vout[i])) {
if (nOutput != -1) {
errors.push_back("Transaction has multiple change outputs");
return Result::WALLET_ERROR;
}
nOutput = i;
}
}
if (nOutput == -1) {
errors.push_back("Transaction does not have a change output");
return Result::WALLET_ERROR;
}
// Calculate the expected size of the new transaction.
int64_t txSize = GetVirtualTransactionSize(*(wtx.tx));
const int64_t maxNewTxSize = CalculateMaximumSignedTxSize(*wtx.tx, wallet);
if (maxNewTxSize < 0) {
errors.push_back("Transaction contains inputs that cannot be signed");
return Result::INVALID_ADDRESS_OR_KEY;
}
// calculate the old fee and fee-rate
old_fee = wtx.GetDebit(ISMINE_SPENDABLE) - wtx.tx->GetValueOut();
CFeeRate nOldFeeRate(old_fee, txSize);
CFeeRate nNewFeeRate;
// The wallet uses a conservative WALLET_INCREMENTAL_RELAY_FEE value to
// future proof against changes to network wide policy for incremental relay
// fee that our node may not be aware of.
CFeeRate walletIncrementalRelayFee = CFeeRate(WALLET_INCREMENTAL_RELAY_FEE);
if (::incrementalRelayFee > walletIncrementalRelayFee) {
walletIncrementalRelayFee = ::incrementalRelayFee;
}
if (total_fee > 0) {
CAmount minTotalFee = nOldFeeRate.GetFee(maxNewTxSize) + ::incrementalRelayFee.GetFee(maxNewTxSize);
if (total_fee < minTotalFee) {
errors.push_back(strprintf("Insufficient totalFee, must be at least %s (oldFee %s + incrementalFee %s)",
FormatMoney(minTotalFee), FormatMoney(nOldFeeRate.GetFee(maxNewTxSize)), FormatMoney(::incrementalRelayFee.GetFee(maxNewTxSize))));
return Result::INVALID_PARAMETER;
}
CAmount requiredFee = GetRequiredFee(*wallet, maxNewTxSize);
if (total_fee < requiredFee) {
errors.push_back(strprintf("Insufficient totalFee (cannot be less than required fee %s)",
FormatMoney(requiredFee)));
return Result::INVALID_PARAMETER;
}
new_fee = total_fee;
nNewFeeRate = CFeeRate(total_fee, maxNewTxSize);
} else {
new_fee = GetMinimumFee(*wallet, maxNewTxSize, coin_control, mempool, ::feeEstimator, nullptr /* FeeCalculation */);
nNewFeeRate = CFeeRate(new_fee, maxNewTxSize);
// New fee rate must be at least old rate + minimum incremental relay rate
// walletIncrementalRelayFee.GetFeePerK() should be exact, because it's initialized
// in that unit (fee per kb).
// However, nOldFeeRate is a calculated value from the tx fee/size, so
// add 1 satoshi to the result, because it may have been rounded down.
if (nNewFeeRate.GetFeePerK() < nOldFeeRate.GetFeePerK() + 1 + walletIncrementalRelayFee.GetFeePerK()) {
nNewFeeRate = CFeeRate(nOldFeeRate.GetFeePerK() + 1 + walletIncrementalRelayFee.GetFeePerK());
new_fee = nNewFeeRate.GetFee(maxNewTxSize);
}
}
// Check that in all cases the new fee doesn't violate maxTxFee
if (new_fee > maxTxFee) {
errors.push_back(strprintf("Specified or calculated fee %s is too high (cannot be higher than maxTxFee %s)",
FormatMoney(new_fee), FormatMoney(maxTxFee)));
return Result::WALLET_ERROR;
}
// check that fee rate is higher than mempool's minimum fee
// (no point in bumping fee if we know that the new tx won't be accepted to the mempool)
// This may occur if the user set TotalFee or paytxfee too low, if fallbackfee is too low, or, perhaps,
// in a rare situation where the mempool minimum fee increased significantly since the fee estimation just a
// moment earlier. In this case, we report an error to the user, who may use total_fee to make an adjustment.
CFeeRate minMempoolFeeRate = mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
if (nNewFeeRate.GetFeePerK() < minMempoolFeeRate.GetFeePerK()) {
errors.push_back(strprintf(
"New fee rate (%s) is lower than the minimum fee rate (%s) to get into the mempool -- "
"the totalFee value should be at least %s or the settxfee value should be at least %s to add transaction",
FormatMoney(nNewFeeRate.GetFeePerK()),
FormatMoney(minMempoolFeeRate.GetFeePerK()),
FormatMoney(minMempoolFeeRate.GetFee(maxNewTxSize)),
FormatMoney(minMempoolFeeRate.GetFeePerK())));
return Result::WALLET_ERROR;
}
// Now modify the output to increase the fee.
// If the output is not large enough to pay the fee, fail.
CAmount nDelta = new_fee - old_fee;
assert(nDelta > 0);
mtx = CMutableTransaction{*wtx.tx};
CTxOut* poutput = &(mtx.vout[nOutput]);
if (poutput->nValue < nDelta) {
errors.push_back("Change output is too small to bump the fee");
return Result::WALLET_ERROR;
}
// If the output would become dust, discard it (converting the dust to fee)
poutput->nValue -= nDelta;
if (poutput->nValue <= GetDustThreshold(*poutput, GetDiscardRate(*wallet, ::feeEstimator))) {
wallet->WalletLogPrintf("Bumping fee and discarding dust output\n");
new_fee += poutput->nValue;
mtx.vout.erase(mtx.vout.begin() + nOutput);
}
// Mark new tx not replaceable, if requested.
if (!coin_control.m_signal_bip125_rbf.get_value_or(wallet->m_signal_rbf)) {
for (auto& input : mtx.vin) {
if (input.nSequence < 0xfffffffe) input.nSequence = 0xfffffffe;
}
}
return Result::OK;
}
bool SignTransaction(CWallet* wallet, CMutableTransaction& mtx) {
LOCK2(cs_main, wallet->cs_wallet);
return wallet->SignTransaction(mtx);
}
Result CommitTransaction(CWallet* wallet, const uint256& txid, CMutableTransaction&& mtx, std::vector<std::string>& errors, uint256& bumped_txid)
{
LOCK2(cs_main, wallet->cs_wallet);
if (!errors.empty()) {
return Result::MISC_ERROR;
}
auto it = txid.IsNull() ? wallet->mapWallet.end() : wallet->mapWallet.find(txid);
if (it == wallet->mapWallet.end()) {
errors.push_back("Invalid or non-wallet transaction id");
return Result::MISC_ERROR;
}
CWalletTx& oldWtx = it->second;
// make sure the transaction still has no descendants and hasn't been mined in the meantime
Result result = PreconditionChecks(wallet, oldWtx, errors);
if (result != Result::OK) {
return result;
}
// commit/broadcast the tx
CTransactionRef tx = MakeTransactionRef(std::move(mtx));
mapValue_t mapValue = oldWtx.mapValue;
mapValue["replaces_txid"] = oldWtx.GetHash().ToString();
CReserveKey reservekey(wallet);
CValidationState state;
if (!wallet->CommitTransaction(tx, std::move(mapValue), oldWtx.vOrderForm, oldWtx.strFromAccount, reservekey, g_connman.get(), state)) {
// NOTE: CommitTransaction never returns false, so this should never happen.
errors.push_back(strprintf("The transaction was rejected: %s", FormatStateMessage(state)));
return Result::WALLET_ERROR;
}
bumped_txid = tx->GetHash();
if (state.IsInvalid()) {
// This can happen if the mempool rejected the transaction. Report
// what happened in the "errors" response.
errors.push_back(strprintf("Error: The transaction was rejected: %s", FormatStateMessage(state)));
}
// mark the original tx as bumped
if (!wallet->MarkReplaced(oldWtx.GetHash(), bumped_txid)) {
// TODO: see if JSON-RPC has a standard way of returning a response
// along with an exception. It would be good to return information about
// wtxBumped to the caller even if marking the original transaction
// replaced does not succeed for some reason.
errors.push_back("Created new bumpfee transaction but could not mark the original transaction as replaced");
}
return Result::OK;
}
} // namespace feebumper
| mit |
jamesls/boto | tests/integration/cloudformation/test_cert_verification.py | 1530 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
import boto.cloudformation
class CertVerificationTest(unittest.TestCase):
cloudformation = True
ssl = True
def test_certs(self):
for region in boto.cloudformation.regions():
c = region.connect()
c.describe_stacks()
| mit |
fuzzysteve/reactions | db.inc.php | 74 | <?php $dbh = new PDO('mysql:host=localhost;dbname=eve', 'eve', 'eve'); ?>
| mit |
ahmadassaf/Chrome-devtools-app | build/Chrome DevTools/osx/Chrome DevTools.app/Contents/Resources/app.nw/devtools/front_end/promises/PromisePane.js | 12681 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* @constructor
* @extends {WebInspector.VBox}
* @implements {WebInspector.TargetManager.Observer}
*/
WebInspector.PromisePane = function()
{
WebInspector.VBox.call(this);
this.registerRequiredCSS("promises/promisePane.css");
this.element.classList.add("promises");
this._enabled = false;
this._target = null;
this._dataGridContainer = new WebInspector.VBox();
this._dataGridContainer.show(this.element);
// FIXME: Make "status" column width fixed to ~16px.
var columns = [
{ id: "status", weight: 1 },
{ id: "function", title: WebInspector.UIString("Function"), disclosure: true, weight: 10 },
{ id: "created", title: WebInspector.UIString("Created"), weight: 10 },
{ id: "settled", title: WebInspector.UIString("Settled"), weight: 10 },
{ id: "tts", title: WebInspector.UIString("Time to settle"), weight: 10 }
];
this._dataGrid = new WebInspector.DataGrid(columns, undefined, undefined, undefined, this._onContextMenu.bind(this));
this._dataGrid.show(this._dataGridContainer.element);
this._linkifier = new WebInspector.Linkifier();
this._throttler = new WebInspector.Throttler(1000);
this._popoverHelper = new WebInspector.PopoverHelper(this.element, this._getPopoverAnchor.bind(this), this._showPopover.bind(this), this._onHidePopover.bind(this));
this._popoverHelper.setTimeout(250, 250);
this.element.addEventListener("click", this._hidePopover.bind(this), true);
WebInspector.targetManager.addModelListener(WebInspector.DebuggerModel, WebInspector.DebuggerModel.Events.DebuggerPaused, this._debuggerStateChanged, this);
WebInspector.targetManager.addModelListener(WebInspector.DebuggerModel, WebInspector.DebuggerModel.Events.DebuggerResumed, this._debuggerStateChanged, this);
WebInspector.context.addFlavorChangeListener(WebInspector.Target, this._targetChanged, this);
WebInspector.targetManager.observeTargets(this);
}
WebInspector.PromisePane._detailsSymbol = Symbol("details");
WebInspector.PromisePane.prototype = {
/**
* @override
* @param {!WebInspector.Target} target
*/
targetAdded: function(target)
{
if (!this._enabled)
return;
this._enablePromiseTracker(target);
},
/**
* @override
* @param {!WebInspector.Target} target
*/
targetRemoved: function(target)
{
if (!this._enabled)
return;
if (this._target === target) {
this._clear();
this._target = null;
}
},
/**
* @param {!WebInspector.Event} event
*/
_targetChanged: function(event)
{
if (!this._enabled)
return;
var target = /** @type {!WebInspector.Target} */ (event.data);
if (this._target === target)
return;
this._clear();
this._target = target;
this._scheduleDataUpdate(true);
},
/** @override */
wasShown: function()
{
if (!this._enabled) {
this._enabled = true;
this._target = WebInspector.context.flavor(WebInspector.Target);
WebInspector.targetManager.targets().forEach(this._enablePromiseTracker, this);
}
this._scheduleDataUpdate(true);
},
/** @override */
willHide: function()
{
this._hidePopover();
},
_hidePopover: function()
{
this._popoverHelper.hidePopover();
},
_onHidePopover: function()
{
this._scheduleDataUpdate(true);
},
/**
* @param {!WebInspector.Target} target
*/
_enablePromiseTracker: function(target)
{
target.debuggerAgent().enablePromiseTracker(true);
},
_debuggerStateChanged: function()
{
this._hidePopover();
this._scheduleDataUpdate(true);
},
/**
* @param {!DebuggerAgent.PromiseDetails} p1
* @param {!DebuggerAgent.PromiseDetails} p2
* @return {number}
*/
_comparePromises: function(p1, p2)
{
var t1 = p1.creationTime || 0;
var t2 = p2.creationTime || 0;
return (t1 - t2) || (p1.id - p2.id);
},
/**
* @param {!WebInspector.Throttler.FinishCallback} finishCallback
*/
_updateData: function(finishCallback)
{
var target = this._target;
if (!target || this._popoverHelper.isPopoverVisible() || this._popoverHelper.isHoverTimerActive()) {
didUpdate.call(this);
return;
}
// FIXME: Run GC on getPromises from backend.
target.heapProfilerAgent().collectGarbage();
target.debuggerAgent().getPromises(didGetPromises.bind(this));
/**
* @param {?Protocol.Error} error
* @param {?Array.<!DebuggerAgent.PromiseDetails>} promiseDetails
* @this {WebInspector.PromisePane}
*/
function didGetPromises(error, promiseDetails)
{
if (target !== this._target || this._popoverHelper.isPopoverVisible() || this._popoverHelper.isHoverTimerActive()) {
didUpdate.call(this);
return;
}
var expandedState = this._dataGridExpandedState();
this._clear();
if (error || !promiseDetails) {
didUpdate.call(this);
return;
}
var nodesToInsert = { __proto__: null };
promiseDetails.sort(this._comparePromises);
for (var i = 0; i < promiseDetails.length; i++) {
var promise = promiseDetails[i];
var statusElement = createElementWithClass("div", "status " + promise.status);
switch (promise.status) {
case "pending":
statusElement.title = WebInspector.UIString("Pending");
break;
case "resolved":
statusElement.title = WebInspector.UIString("Fulfilled");
break;
case "rejected":
statusElement.title = WebInspector.UIString("Rejected");
break;
}
var data = {
status: statusElement,
promiseId: promise.id,
function: WebInspector.beautifyFunctionName(promise.callFrame ? promise.callFrame.functionName : "")
};
if (promise.callFrame)
data.created = this._linkifier.linkifyConsoleCallFrame(target, promise.callFrame);
if (promise.settlementStack && promise.settlementStack[0])
data.settled = this._linkifier.linkifyConsoleCallFrame(target, promise.settlementStack[0]);
if (promise.creationTime && promise.settlementTime && promise.settlementTime >= promise.creationTime)
data.tts = Number.millisToString(promise.settlementTime - promise.creationTime);
var node = new WebInspector.DataGridNode(data, false);
node.selectable = false;
node[WebInspector.PromisePane._detailsSymbol] = promise;
nodesToInsert[promise.id] = { node: node, parentId: promise.parentId };
}
var rootNode = this._dataGrid.rootNode();
for (var id in nodesToInsert) {
var node = nodesToInsert[id].node;
var parentId = nodesToInsert[id].parentId;
var parentNode = (parentId && nodesToInsert[parentId]) ? nodesToInsert[parentId].node : rootNode;
parentNode.appendChild(node);
}
for (var id in nodesToInsert) {
var node = nodesToInsert[id].node;
node.expanded = (id in expandedState ? expandedState[id] : true);
}
didUpdate.call(this);
}
/**
* @this {WebInspector.PromisePane}
*/
function didUpdate()
{
this._scheduleDataUpdate();
finishCallback();
}
},
_dataGridExpandedState: function()
{
var result = { __proto__: null };
examineNode(this._dataGrid.rootNode());
return result;
/**
* @param {!WebInspector.DataGridNode} node
*/
function examineNode(node)
{
var details = node[WebInspector.PromisePane._detailsSymbol];
if (details)
result[details.id] = node.hasChildren ? node.expanded : true;
for (var child of node.children)
examineNode(child);
}
},
/**
* @param {boolean=} asSoonAsPossible
*/
_scheduleDataUpdate: function(asSoonAsPossible)
{
if (!this.isShowing())
return;
this._throttler.schedule(this._updateData.bind(this), asSoonAsPossible);
},
_clear: function()
{
this._hidePopover();
this._dataGrid.rootNode().removeChildren();
this._linkifier.reset();
},
/**
* @param {!WebInspector.ContextMenu} contextMenu
* @param {!WebInspector.DataGridNode} node
*/
_onContextMenu: function(contextMenu, node)
{
var target = this._target;
if (!target)
return;
var promiseId = node.data.promiseId;
contextMenu.appendItem(WebInspector.UIString.capitalize("Show in ^console"), showPromiseInConsole);
contextMenu.show();
function showPromiseInConsole()
{
target.debuggerAgent().getPromiseById(promiseId, "console", didGetPromiseById);
}
/**
* @param {?Protocol.Error} error
* @param {?RuntimeAgent.RemoteObject} promise
*/
function didGetPromiseById(error, promise)
{
if (error || !promise)
return;
target.consoleAgent().setLastEvaluationResult(promise.objectId);
var message = new WebInspector.ConsoleMessage(target,
WebInspector.ConsoleMessage.MessageSource.Other,
WebInspector.ConsoleMessage.MessageLevel.Log,
"",
WebInspector.ConsoleMessage.MessageType.Log,
undefined,
undefined,
undefined,
undefined,
[promise]);
target.consoleModel.addMessage(message);
WebInspector.console.show();
}
},
/**
* @param {!Element} element
* @param {!Event} event
* @return {!Element|!AnchorBox|undefined}
*/
_getPopoverAnchor: function(element, event)
{
if (!this._target)
return undefined;
var node = this._dataGrid.dataGridNodeFromNode(element);
if (!node)
return undefined;
var details = node[WebInspector.PromisePane._detailsSymbol];
if (!details)
return undefined;
var anchor = element.enclosingNodeOrSelfWithClass("created-column");
if (anchor)
return details.creationStack ? anchor : undefined;
anchor = element.enclosingNodeOrSelfWithClass("settled-column");
return (anchor && details.settlementStack) ? anchor : undefined;
},
/**
* @param {!Element} anchor
* @param {!WebInspector.Popover} popover
*/
_showPopover: function(anchor, popover)
{
var node = this._dataGrid.dataGridNodeFromNode(anchor);
var details = node[WebInspector.PromisePane._detailsSymbol];
var stackTrace;
var asyncStackTrace;
if (anchor.classList.contains("created-column")) {
stackTrace = details.creationStack;
asyncStackTrace = details.asyncCreationStack;
} else {
stackTrace = details.settlementStack;
asyncStackTrace = details.asyncSettlementStack;
}
var content = WebInspector.DOMPresentationUtils.buildStackTracePreviewContents(this._target, this._linkifier, stackTrace, asyncStackTrace);
popover.setCanShrink(true);
popover.showForAnchor(content, anchor);
},
__proto__: WebInspector.VBox.prototype
}
| mit |
Franky666/programmiersprachen-raytracer | external/boost_1_59_0/libs/tr1/test/std_headers/test_ostream.cpp | 256 | // (C) Copyright John Maddock 2005.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <ostream>
| mit |
cdnjs/cdnjs | ajax/libs/simple-icons/6.4.0/c.js | 917 | console.warn("warn -",`Imports like "const c = require('simple-icons/icons/c');" have been deprecated in v6.0.0 and will no longer work from v7.0.0, use "const { siC } = require('simple-icons/icons');" instead`),module.exports={title:"C",slug:"c",get svg(){return'<svg role="img" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><title>C</title><path d="'+this.path+'"/></svg>'},path:"M16.5921 9.1962s-.354-3.298-3.627-3.39c-3.2741-.09-4.9552 2.474-4.9552 6.14 0 3.6651 1.858 6.5972 5.0451 6.5972 3.184 0 3.5381-3.665 3.5381-3.665l6.1041.365s.36 3.31-2.196 5.836c-2.552 2.5241-5.6901 2.9371-7.8762 2.9201-2.19-.017-5.2261.034-8.1602-2.97-2.938-3.0101-3.436-5.9302-3.436-8.8002 0-2.8701.556-6.6702 4.047-9.5502C7.444.72 9.849 0 12.254 0c10.0422 0 10.7172 9.2602 10.7172 9.2602z",source:"https://commons.wikimedia.org/wiki/File:The_C_Programming_Language_logo.svg",hex:"A8B9CC",guidelines:void 0,license:void 0};
| mit |
pruebasetichat/google-plugin-for-eclipse | plugins/com.google.gdt.eclipse.managedapis/src/com/google/gdt/eclipse/managedapis/TimeProvider.java | 971 | /*******************************************************************************
* Copyright 2011 Google Inc. All Rights Reserved.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package com.google.gdt.eclipse.managedapis;
/**
* Interface for getting current time (see.System.currentTimeMillis()).
*/
public interface TimeProvider {
long getCurrentTime();
}
| epl-1.0 |
Roma48/mayak | templates/gk_events/admin/elements/asset.php | 573 | <?php
defined('JPATH_BASE') or die;
if(!defined('DS')){ define('DS',DIRECTORY_SEPARATOR); }
jimport('joomla.form.formfield');
class JFormFieldAsset extends JFormField {
protected $type = 'Asset';
protected function getInput() {
$doc = JFactory::getDocument();
if($this->element['extension'] == 'js') {
$doc->addScript(JURI::root().$this->element['path']);
} else {
$doc->addStyleSheet(JURI::root().$this->element['path']);
}
return;
}
}
/* EOF */
| gpl-2.0 |
alexsco74/drupal-trade | sites/all/modules/contrib/qtip/modules/qtip_views/js/qtip_views.js | 1452 | (function ($) {
Drupal.behaviors.qtipViewsLabelTooltip = {
attach: function(context) {
if (Drupal.settings.qtipViewsLabelTooltip) {
$.each(Drupal.settings.qtipViewsLabelTooltip, function(view, displays) {
$.each(displays, function(display, settings) {
$.each(settings.tooltips, function(field, tooltip) {
$('.view-id-' + view + '.view-display-id-' + display + ' .qtip-views-field-' + field + '-label')
.once('qtip-views-element-label')
.append(tooltip)
.children(':first-child')
.wrap('<span class="qtip-link qtip-views-link"></span>');
});
});
});
}
},
};
Drupal.behaviors.qtipViewsTooltip = {
attach: function(context) {
if (Drupal.settings.qtipViewsTooltip) {
$.each(Drupal.settings.qtipViewsTooltip, function(view, displays) {
$.each(displays, function(display, settings) {
$.each(settings.tooltips, function(row, tooltip) {
$.each(tooltip, function(field, content) {
$('.view-id-' + view + '.view-display-id-' + display + ' .qtip-views-field-' + field + ':eq(' + row + ')')
.once('qtip-views-element')
.wrapInner('<span class="qtip-link"></span>')
.append(content);
});
});
});
});
}
},
};
})(jQuery);
| gpl-2.0 |
FauxFaux/jdk9-jdk | src/jdk.jdi/share/classes/com/sun/jdi/connect/Connector.java | 9772 | /*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.jdi.connect;
import java.util.Map;
import java.util.List;
import java.io.Serializable;
/**
* A method of connection between a debugger and a target VM.
* A connector encapsulates exactly one {@link Transport}. used
* to establish the connection. Each connector has a set of arguments
* which controls its operation. The arguments are stored as a
* map, keyed by a string. Each implementation defines the string
* argument keys it accepts.
*
* @see LaunchingConnector
* @see AttachingConnector
* @see ListeningConnector
* @see Connector.Argument
*
* @author Gordon Hirsch
* @since 1.3
*/
public interface Connector {
/**
* Returns a short identifier for the connector. Connector implementors
* should follow similar naming conventions as are used with packages
* to avoid name collisions. For example, the Sun connector
* implementations have names prefixed with "com.sun.jdi.".
* Not intended for exposure to end-user.
*
* @return the name of this connector.
*/
String name();
/**
* Returns a human-readable description of this connector
* and its purpose.
*
* @return the description of this connector
*/
String description();
/**
* Returns the transport mechanism used by this connector to establish
* connections with a target VM.
*
* @return the {@link Transport} used by this connector.
*/
Transport transport();
/**
* Returns the arguments accepted by this Connector and their
* default values. The keys of the returned map are string argument
* names. The values are {@link Connector.Argument} containing
* information about the argument and its default value.
*
* @return the map associating argument names with argument
* information and default value.
*/
Map<String,Connector.Argument> defaultArguments();
/**
* Specification for and value of a Connector argument.
* Will always implement a subinterface of Argument:
* {@link Connector.StringArgument}, {@link Connector.BooleanArgument},
* {@link Connector.IntegerArgument},
* or {@link Connector.SelectedArgument}.
*/
public interface Argument extends Serializable {
/**
* Returns a short, unique identifier for the argument.
* Not intended for exposure to end-user.
*
* @return the name of this argument.
*/
String name();
/**
* Returns a short human-readable label for this argument.
*
* @return a label for this argument
*/
String label();
/**
* Returns a human-readable description of this argument
* and its purpose.
*
* @return the description of this argument
*/
String description();
/**
* Returns the current value of the argument. Initially, the
* default value is returned. If the value is currently unspecified,
* null is returned.
*
* @return the current value of the argument.
*/
String value();
/**
* Sets the value of the argument.
* The value should be checked with {@link #isValid(String)}
* before setting it; invalid values will throw an exception
* when the connection is established - for example,
* on {@link LaunchingConnector#launch}
*/
void setValue(String value);
/**
* Performs basic sanity check of argument.
* @return <code>true</code> if the value is valid to be
* used in {@link #setValue(String)}
*/
boolean isValid(String value);
/**
* Indicates whether the argument must be specified. If true,
* {@link #setValue} must be used to set a non-null value before
* using this argument in establishing a connection.
*
* @return <code>true</code> if the argument must be specified;
* <code>false</code> otherwise.
*/
boolean mustSpecify();
}
/**
* Specification for and value of a Connector argument,
* whose value is Boolean. Boolean values are represented
* by the localized versions of the strings "true" and "false".
*/
public interface BooleanArgument extends Argument {
/**
* Sets the value of the argument.
*/
void setValue(boolean value);
/**
* Performs basic sanity check of argument.
* @return <code>true</code> if value is a string
* representation of a boolean value.
* @see #stringValueOf(boolean)
*/
boolean isValid(String value);
/**
* Return the string representation of the <code>value</code>
* parameter.
* Does not set or examine the current value of <code>this</code>
* instance.
* @return the localized String representation of the
* boolean value.
*/
String stringValueOf(boolean value);
/**
* Return the value of the argument as a boolean. Since
* the argument may not have been set or may have an invalid
* value {@link #isValid(String)} should be called on
* {@link #value()} to check its validity. If it is invalid
* the boolean returned by this method is undefined.
* @return the value of the argument as a boolean.
*/
boolean booleanValue();
}
/**
* Specification for and value of a Connector argument,
* whose value is an integer. Integer values are represented
* by their corresponding strings.
*/
public interface IntegerArgument extends Argument {
/**
* Sets the value of the argument.
* The value should be checked with {@link #isValid(int)}
* before setting it; invalid values will throw an exception
* when the connection is established - for example,
* on {@link LaunchingConnector#launch}
*/
void setValue(int value);
/**
* Performs basic sanity check of argument.
* @return <code>true</code> if value represents an int that is
* <code>{@link #min()} <= value <= {@link #max()}</code>
*/
boolean isValid(String value);
/**
* Performs basic sanity check of argument.
* @return <code>true</code> if
* <code>{@link #min()} <= value <= {@link #max()}</code>
*/
boolean isValid(int value);
/**
* Return the string representation of the <code>value</code>
* parameter.
* Does not set or examine the current value of <code>this</code>
* instance.
* @return the String representation of the
* int value.
*/
String stringValueOf(int value);
/**
* Return the value of the argument as a int. Since
* the argument may not have been set or may have an invalid
* value {@link #isValid(String)} should be called on
* {@link #value()} to check its validity. If it is invalid
* the int returned by this method is undefined.
* @return the value of the argument as a int.
*/
int intValue();
/**
* The upper bound for the value.
* @return the maximum allowed value for this argument.
*/
int max();
/**
* The lower bound for the value.
* @return the minimum allowed value for this argument.
*/
int min();
}
/**
* Specification for and value of a Connector argument,
* whose value is a String.
*/
public interface StringArgument extends Argument {
/**
* Performs basic sanity check of argument.
* @return <code>true</code> always
*/
boolean isValid(String value);
}
/**
* Specification for and value of a Connector argument,
* whose value is a String selected from a list of choices.
*/
public interface SelectedArgument extends Argument {
/**
* Return the possible values for the argument
* @return {@link List} of {@link String}
*/
List<String> choices();
/**
* Performs basic sanity check of argument.
* @return <code>true</code> if value is one of {@link #choices()}.
*/
boolean isValid(String value);
}
}
| gpl-2.0 |
cloos/rt | devel/third-party/ckeditor/plugins/dialog/samples/assets/my_dialog.js | 932 | /**
* @license Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
* For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.dialog.add( 'myDialog', function( editor ) {
return {
title: 'My Dialog',
minWidth: 400,
minHeight: 200,
contents: [
{
id: 'tab1',
label: 'First Tab',
title: 'First Tab',
elements: [
{
id: 'input1',
type: 'text',
label: 'Text Field'
},
{
id: 'select1',
type: 'select',
label: 'Select Field',
items: [
[ 'option1', 'value1' ],
[ 'option2', 'value2' ]
]
}
]
},
{
id: 'tab2',
label: 'Second Tab',
title: 'Second Tab',
elements: [
{
id: 'button1',
type: 'button',
label: 'Button Field'
}
]
}
]
};
});
// %LEAVE_UNMINIFIED% %REMOVE_LINE%
| gpl-2.0 |
LivioCavallo/joomla-cms | components/com_content/views/form/tmpl/edit.php | 6826 | <?php
/**
* @package Joomla.Site
* @subpackage com_content
*
* @copyright Copyright (C) 2005 - 2018 Open Source Matters, Inc. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE.txt
*/
defined('_JEXEC') or die;
JHtml::_('behavior.tabstate');
JHtml::_('behavior.keepalive');
JHtml::_('behavior.formvalidator');
JHtml::_('formbehavior.chosen', '#jform_catid', null, array('disable_search_threshold' => 0));
JHtml::_('formbehavior.chosen', 'select');
$this->tab_name = 'com-content-form';
$this->ignore_fieldsets = array('image-intro', 'image-full', 'jmetadata', 'item_associations');
// Create shortcut to parameters.
$params = $this->state->get('params');
// This checks if the editor config options have ever been saved. If they haven't they will fall back to the original settings.
$editoroptions = isset($params->show_publishing_options);
if (!$editoroptions)
{
$params->show_urls_images_frontend = '0';
}
JFactory::getDocument()->addScriptDeclaration("
Joomla.submitbutton = function(task)
{
if (task == 'article.cancel' || document.formvalidator.isValid(document.getElementById('adminForm')))
{
" . $this->form->getField('articletext')->save() . "
Joomla.submitform(task);
}
}
");
?>
<div class="edit item-page<?php echo $this->pageclass_sfx; ?>">
<?php if ($params->get('show_page_heading')) : ?>
<div class="page-header">
<h1>
<?php echo $this->escape($params->get('page_heading')); ?>
</h1>
</div>
<?php endif; ?>
<form action="<?php echo JRoute::_('index.php?option=com_content&a_id=' . (int) $this->item->id); ?>" method="post" name="adminForm" id="adminForm" class="form-validate form-vertical">
<fieldset>
<?php echo JHtml::_('bootstrap.startTabSet', $this->tab_name, array('active' => 'editor')); ?>
<?php echo JHtml::_('bootstrap.addTab', $this->tab_name, 'editor', JText::_('COM_CONTENT_ARTICLE_CONTENT')); ?>
<?php echo $this->form->renderField('title'); ?>
<?php if (is_null($this->item->id)) : ?>
<?php echo $this->form->renderField('alias'); ?>
<?php endif; ?>
<?php echo $this->form->getInput('articletext'); ?>
<?php if ($this->captchaEnabled) : ?>
<?php echo $this->form->renderField('captcha'); ?>
<?php endif; ?>
<?php echo JHtml::_('bootstrap.endTab'); ?>
<?php if ($params->get('show_urls_images_frontend')) : ?>
<?php echo JHtml::_('bootstrap.addTab', $this->tab_name, 'images', JText::_('COM_CONTENT_IMAGES_AND_URLS')); ?>
<?php echo $this->form->renderField('image_intro', 'images'); ?>
<?php echo $this->form->renderField('image_intro_alt', 'images'); ?>
<?php echo $this->form->renderField('image_intro_caption', 'images'); ?>
<?php echo $this->form->renderField('float_intro', 'images'); ?>
<?php echo $this->form->renderField('image_fulltext', 'images'); ?>
<?php echo $this->form->renderField('image_fulltext_alt', 'images'); ?>
<?php echo $this->form->renderField('image_fulltext_caption', 'images'); ?>
<?php echo $this->form->renderField('float_fulltext', 'images'); ?>
<?php echo $this->form->renderField('urla', 'urls'); ?>
<?php echo $this->form->renderField('urlatext', 'urls'); ?>
<div class="control-group">
<div class="controls">
<?php echo $this->form->getInput('targeta', 'urls'); ?>
</div>
</div>
<?php echo $this->form->renderField('urlb', 'urls'); ?>
<?php echo $this->form->renderField('urlbtext', 'urls'); ?>
<div class="control-group">
<div class="controls">
<?php echo $this->form->getInput('targetb', 'urls'); ?>
</div>
</div>
<?php echo $this->form->renderField('urlc', 'urls'); ?>
<?php echo $this->form->renderField('urlctext', 'urls'); ?>
<div class="control-group">
<div class="controls">
<?php echo $this->form->getInput('targetc', 'urls'); ?>
</div>
</div>
<?php echo JHtml::_('bootstrap.endTab'); ?>
<?php endif; ?>
<?php echo JLayoutHelper::render('joomla.edit.params', $this); ?>
<?php echo JHtml::_('bootstrap.addTab', $this->tab_name, 'publishing', JText::_('COM_CONTENT_PUBLISHING')); ?>
<?php echo $this->form->renderField('catid'); ?>
<?php echo $this->form->renderField('tags'); ?>
<?php echo $this->form->renderField('note'); ?>
<?php if ($params->get('save_history', 0)) : ?>
<?php echo $this->form->renderField('version_note'); ?>
<?php endif; ?>
<?php if ($params->get('show_publishing_options', 1) == 1) : ?>
<?php echo $this->form->renderField('created_by_alias'); ?>
<?php endif; ?>
<?php if ($this->item->params->get('access-change')) : ?>
<?php echo $this->form->renderField('state'); ?>
<?php echo $this->form->renderField('featured'); ?>
<?php if ($params->get('show_publishing_options', 1) == 1) : ?>
<?php echo $this->form->renderField('publish_up'); ?>
<?php echo $this->form->renderField('publish_down'); ?>
<?php endif; ?>
<?php endif; ?>
<?php echo $this->form->renderField('access'); ?>
<?php if (is_null($this->item->id)) : ?>
<div class="control-group">
<div class="control-label">
</div>
<div class="controls">
<?php echo JText::_('COM_CONTENT_ORDERING'); ?>
</div>
</div>
<?php endif; ?>
<?php echo JHtml::_('bootstrap.endTab'); ?>
<?php echo JHtml::_('bootstrap.addTab', $this->tab_name, 'language', JText::_('JFIELD_LANGUAGE_LABEL')); ?>
<?php echo $this->form->renderField('language'); ?>
<?php echo JHtml::_('bootstrap.endTab'); ?>
<?php if ($params->get('show_publishing_options', 1) == 1) : ?>
<?php echo JHtml::_('bootstrap.addTab', $this->tab_name, 'metadata', JText::_('COM_CONTENT_METADATA')); ?>
<?php echo $this->form->renderField('metadesc'); ?>
<?php echo $this->form->renderField('metakey'); ?>
<?php echo JHtml::_('bootstrap.endTab'); ?>
<?php endif; ?>
<?php echo JHtml::_('bootstrap.endTabSet'); ?>
<input type="hidden" name="task" value="" />
<input type="hidden" name="return" value="<?php echo $this->return_page; ?>" />
<?php echo JHtml::_('form.token'); ?>
</fieldset>
<div class="btn-toolbar">
<div class="btn-group">
<button type="button" class="btn btn-primary" onclick="Joomla.submitbutton('article.save')">
<span class="icon-ok"></span><?php echo JText::_('JSAVE') ?>
</button>
</div>
<div class="btn-group">
<button type="button" class="btn" onclick="Joomla.submitbutton('article.cancel')">
<span class="icon-cancel"></span><?php echo JText::_('JCANCEL') ?>
</button>
</div>
<?php if ($params->get('save_history', 0) && $this->item->id) : ?>
<div class="btn-group">
<?php echo $this->form->getInput('contenthistory'); ?>
</div>
<?php endif; ?>
</div>
</form>
</div>
| gpl-2.0 |
princeofdarkness76/gcc-4.2 | libjava/classpath/gnu/java/nio/charset/Cp437.java | 4185 | /* Cp437.java -- Charset implementation for the Cp437 character set.
Copyright (C) 2005 Free Software Foundation, Inc.
This file is part of GNU Classpath.
GNU Classpath is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU Classpath is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Classpath; see the file COPYING. If not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
Linking this library statically or dynamically with other modules is
making a combined work based on this library. Thus, the terms and
conditions of the GNU General Public License cover the whole
combination.
As a special exception, the copyright holders of this library give you
permission to link this library with independent modules to produce an
executable, regardless of the license terms of these independent
modules, and to copy and distribute the resulting executable under
terms of your choice, provided that you also meet, for each linked
independent module, the terms and conditions of the license of that
module. An independent module is a module which is not derived from
or based on this library. If you modify this library, you may extend
this exception to your version of the library, but you are not
obligated to do so. If you do not wish to do so, delete this
exception statement from your version. */
package gnu.java.nio.charset;
public class Cp437 extends ByteCharset
{
/**
* This is the lookup table for this encoding
*/
private static final char[] lookup =
{
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F,
0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E4, 0x00E0, 0x00E5, 0x00E7,
0x00EA, 0x00EB, 0x00E8, 0x00EF, 0x00EE, 0x00EC, 0x00C4, 0x00C5,
0x00C9, 0x00E6, 0x00C6, 0x00F4, 0x00F6, 0x00F2, 0x00FB, 0x00F9,
0x00FF, 0x00D6, 0x00DC, 0x00A2, 0x00A3, 0x00A5, 0x20A7, 0x0192,
0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00F1, 0x00D1, 0x00AA, 0x00BA,
0x00BF, 0x2310, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00BB,
0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556,
0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510,
0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F,
0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567,
0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B,
0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580,
0x03B1, 0x00DF, 0x0393, 0x03C0, 0x03A3, 0x03C3, 0x00B5, 0x03C4,
0x03A6, 0x0398, 0x03A9, 0x03B4, 0x221E, 0x03C6, 0x03B5, 0x2229,
0x2261, 0x00B1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00F7, 0x2248,
0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0
};
public Cp437()
{
super("Cp437", new String[] {
});
lookupTable = lookup;
}
} // class Cp437
| gpl-2.0 |
krabina/Maps | includes/parserhooks/Maps_Geodistance.php | 3095 | <?php
/**
* Class for the 'geodistance' parser hooks, which can
* calculate the geographical distance between two points.
*
* @since 0.7
*
* @licence GNU GPL v2+
* @author Jeroen De Dauw < [email protected] >
*/
class MapsGeodistance extends ParserHook {
/**
* Gets the name of the parser hook.
* @see ParserHook::getName
*
* @since 0.7
*
* @return string
*/
protected function getName() {
return 'geodistance';
}
/**
* Returns an array containing the parameter info.
* @see ParserHook::getParameterInfo
*
* @since 0.7
*
* @return array
*/
protected function getParameterInfo( $type ) {
global $egMapsDistanceUnit, $egMapsDistanceDecimals, $egMapsAvailableGeoServices, $egMapsDefaultGeoService;
$params = array();
$params['mappingservice'] = array(
'default' => '',
'values' => MapsMappingServices::getAllServiceValues(),
'tolower' => true,
);
$params['geoservice'] = array(
'default' => $egMapsDefaultGeoService,
'aliases' => 'service',
'values' => $egMapsAvailableGeoServices,
'tolower' => true,
);
$params['unit'] = array(
'default' => $egMapsDistanceUnit,
'values' => MapsDistanceParser::getUnits(),
);
$params['decimals'] = array(
'type' => 'integer',
'default' => $egMapsDistanceDecimals,
);
$params['location1'] = array(
'type' => 'mapslocation',
'aliases' => 'from',
'dependencies' => array( 'mappingservice', 'geoservice' ),
);
$params['location2'] = array(
'type' => 'mapslocation',
'aliases' => 'to',
'dependencies' => array( 'mappingservice', 'geoservice' ),
);
// Give grep a chance to find the usages:
// maps-geodistance-par-mappingservice, maps-geodistance-par-geoservice,
// maps-geodistance-par-unit, maps-geodistance-par-decimals,
// maps-geodistance-par-location1, maps-geodistance-par-location2
foreach ( $params as $name => &$param ) {
$param['message'] = 'maps-geodistance-par-' . $name;
}
return $params;
}
/**
* Returns the list of default parameters.
* @see ParserHook::getDefaultParameters
*
* @since 0.7
*
* @param $type
*
* @return array
*/
protected function getDefaultParameters( $type ) {
return array( 'location1', 'location2', 'unit', 'decimals' );
}
/**
* Renders and returns the output.
* @see ParserHook::render
*
* @since 0.7
*
* @param array $parameters
*
* @return string
* @throws MWException
*/
public function render( array $parameters ) {
/**
* @var \DataValues\LatLongValue $coordinates1
* @var \DataValues\LatLongValue $coordinates2
*/
$coordinates1 = $parameters['location1']->getCoordinates();
$coordinates2 = $parameters['location2']->getCoordinates();
$distance = MapsGeoFunctions::calculateDistance( $coordinates1, $coordinates2 );
$output = MapsDistanceParser::formatDistance( $distance, $parameters['unit'], $parameters['decimals'] );
return $output;
}
/**
* @see ParserHook::getMessage
*
* @since 1.0
*/
public function getMessage() {
return 'maps-geodistance-description';
}
} | gpl-2.0 |
DailyShana/ygopro-scripts | c76775123.lua | 1189 | --パトロール・ロボ
function c76775123.initial_effect(c)
--confirm
local e1=Effect.CreateEffect(c)
e1:SetDescription(aux.Stringid(76775123,0))
e1:SetType(EFFECT_TYPE_FIELD+EFFECT_TYPE_TRIGGER_O)
e1:SetProperty(EFFECT_FLAG_CARD_TARGET)
e1:SetCode(EVENT_PHASE+PHASE_STANDBY)
e1:SetRange(LOCATION_MZONE)
e1:SetCountLimit(1)
e1:SetCondition(c76775123.condition)
e1:SetTarget(c76775123.target)
e1:SetOperation(c76775123.operation)
c:RegisterEffect(e1)
end
function c76775123.condition(e,tp,eg,ep,ev,re,r,rp)
return tp==Duel.GetTurnPlayer()
end
function c76775123.target(e,tp,eg,ep,ev,re,r,rp,chk,chkc)
if chkc then return chkc:IsControler(1-tp) and chkc:IsOnField() and chkc:IsFacedown() end
if chk==0 then return Duel.IsExistingTarget(Card.IsFacedown,tp,0,LOCATION_ONFIELD,1,nil) end
Duel.Hint(HINT_SELECTMSG,tp,HINTMSG_FACEDOWN)
Duel.SelectTarget(tp,Card.IsFacedown,tp,0,LOCATION_ONFIELD,1,1,nil)
end
function c76775123.operation(e,tp,eg,ep,ev,re,r,rp)
local c=e:GetHandler()
if c:IsFacedown() or not c:IsRelateToEffect(e) then return end
local tc=Duel.GetFirstTarget()
if tc and tc:IsRelateToEffect(e) and tc:IsFacedown() then
Duel.ConfirmCards(tp,tc)
end
end
| gpl-2.0 |
ppkt/Clementine | src/songinfo/lastfmtrackinfoprovider.cpp | 4614 | /* This file is part of Clementine.
Copyright 2010, David Sansome <[email protected]>
Clementine is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Clementine is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Clementine. If not, see <http://www.gnu.org/licenses/>.
*/
#include "lastfmtrackinfoprovider.h"
#include "songinfotextview.h"
#include "songplaystats.h"
#include "tagwidget.h"
#include "internet/lastfm/lastfmcompat.h"
#include "ui/iconloader.h"
void LastfmTrackInfoProvider::FetchInfo(int id, const Song& metadata) {
QMap<QString, QString> params;
params["method"] = "track.getInfo";
params["track"] = metadata.title();
params["artist"] = metadata.artist();
if (!lastfm::ws::Username.isEmpty())
params["username"] = lastfm::ws::Username;
QNetworkReply* reply = lastfm::ws::get(params);
connect(reply, SIGNAL(finished()), SLOT(RequestFinished()));
requests_[reply] = id;
}
void LastfmTrackInfoProvider::RequestFinished() {
QNetworkReply* reply = qobject_cast<QNetworkReply*>(sender());
if (!reply || !requests_.contains(reply)) return;
const int id = requests_.take(reply);
if (reply->error() != QNetworkReply::NoError) {
emit Finished(id);
return;
}
lastfm::XmlQuery query(lastfm::compat::EmptyXmlQuery());
if (lastfm::compat::ParseQuery(reply->readAll(), &query)) {
GetPlayCounts(id, query);
GetWiki(id, query);
GetTags(id, query);
}
emit Finished(id);
}
void LastfmTrackInfoProvider::GetPlayCounts(int id, const lastfm::XmlQuery& q) {
// Parse the response
const int listeners = q["track"]["listeners"].text().toInt();
const int playcount = q["track"]["playcount"].text().toInt();
int myplaycount = -1;
bool love = false;
if (!q["track"].children("userplaycount").isEmpty()) {
myplaycount = q["track"]["userplaycount"].text().toInt();
love = q["track"]["userloved"].text() == "1";
}
if (!listeners && !playcount && myplaycount == -1) return; // No useful data
CollapsibleInfoPane::Data data;
data.id_ = "lastfm/playcounts";
data.title_ = tr("Last.fm play counts");
data.type_ = CollapsibleInfoPane::Data::Type_PlayCounts;
data.icon_ = QIcon(":/last.fm/as.png");
SongPlayStats* widget = new SongPlayStats;
data.contents_ = widget;
if (myplaycount != -1) {
if (love)
widget->AddItem(QIcon(":/last.fm/love.png"), tr("You love this track"));
widget->AddItem(QIcon(":/last.fm/icon_user.png"),
tr("Your scrobbles: %1").arg(myplaycount));
}
if (playcount)
widget->AddItem(IconLoader::Load("media-playback-start"),
tr("%L1 total plays").arg(playcount));
if (listeners)
widget->AddItem(QIcon(":/last.fm/my_neighbours.png"),
tr("%L1 other listeners").arg(listeners));
emit InfoReady(id, data);
}
void LastfmTrackInfoProvider::GetWiki(int id, const lastfm::XmlQuery& q) {
// Parse the response
if (q["track"].children("wiki").isEmpty()) return; // No wiki element
const QString content = q["track"]["wiki"]["content"].text();
if (content.isEmpty()) return; // No useful data
CollapsibleInfoPane::Data data;
data.id_ = "lastfm/songwiki";
data.title_ = tr("Last.fm wiki");
data.type_ = CollapsibleInfoPane::Data::Type_Biography;
data.icon_ = QIcon(":/last.fm/as.png");
SongInfoTextView* widget = new SongInfoTextView;
data.contents_ = widget;
widget->SetHtml(content);
emit InfoReady(id, data);
}
void LastfmTrackInfoProvider::GetTags(int id, const lastfm::XmlQuery& q) {
// Parse the response
if (q["track"].children("toptags").isEmpty() ||
q["track"]["toptags"].children("tag").isEmpty())
return; // No tag elements
CollapsibleInfoPane::Data data;
data.id_ = "lastfm/songtags";
data.title_ = tr("Last.fm tags");
data.type_ = CollapsibleInfoPane::Data::Type_Biography;
data.icon_ = QIcon(":/last.fm/icon_tag.png");
TagWidget* widget = new TagWidget(TagWidget::Type_Tags);
data.contents_ = widget;
widget->SetIcon(data.icon_);
for (const lastfm::XmlQuery& e : q["track"]["toptags"].children("tag")) {
widget->AddTag(e["name"].text());
}
emit InfoReady(id, data);
}
| gpl-3.0 |
santssoft/darkstar | scripts/zones/North_Gustaberg/npcs/relic.lua | 898 | -----------------------------------
-- Area: North Gustaberg
-- NPC: <this space intentionally left blank>
-- !pos -217 97 461 106
-----------------------------------
local ID = require("scripts/zones/North_Gustaberg/IDs")
require("scripts/globals/npc_util")
-----------------------------------
function onTrade(player, npc, trade)
if player:getCharVar("RELIC_IN_PROGRESS") == 18305 and npcUtil.tradeHas(trade, {1451, 1577, 1589, 18305}) then -- currency, shard, necropsyche, stage 4
player:startEvent(254, 18306)
end
end
function onTrigger(player, npc)
player:messageSpecial(ID.text.NOTHING_OUT_OF_ORDINARY)
end
function onEventUpdate(player, csid, option)
end
function onEventFinish(player, csid, option)
if csid == 254 and npcUtil.giveItem(player, {18306, {1450, 30}}) then
player:confirmTrade()
player:setCharVar("RELIC_IN_PROGRESS", 0)
end
end
| gpl-3.0 |
mateor/pdroid | android-2.3.4_r1/tags/1.25/frameworks/base/core/java/android/ddm/DdmHandleAppName.java | 2788 | /*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.ddm;
import org.apache.harmony.dalvik.ddmc.Chunk;
import org.apache.harmony.dalvik.ddmc.ChunkHandler;
import org.apache.harmony.dalvik.ddmc.DdmServer;
import android.util.Config;
import android.util.Log;
import java.nio.ByteBuffer;
/**
* Track our app name. We don't (currently) handle any inbound packets.
*/
public class DdmHandleAppName extends ChunkHandler {
public static final int CHUNK_APNM = type("APNM");
private volatile static String mAppName = "";
private static DdmHandleAppName mInstance = new DdmHandleAppName();
/* singleton, do not instantiate */
private DdmHandleAppName() {}
/**
* Register for the messages we're interested in.
*/
public static void register() {}
/**
* Called when the DDM server connects. The handler is allowed to
* send messages to the server.
*/
public void connected() {}
/**
* Called when the DDM server disconnects. Can be used to disable
* periodic transmissions or clean up saved state.
*/
public void disconnected() {}
/**
* Handle a chunk of data.
*/
public Chunk handleChunk(Chunk request) {
return null;
}
/**
* Set the application name. Called when we get named, which may be
* before or after DDMS connects. For the latter we need to send up
* an APNM message.
*/
public static void setAppName(String name) {
if (name == null || name.length() == 0)
return;
mAppName = name;
// if DDMS is already connected, send the app name up
sendAPNM(name);
}
public static String getAppName() {
return mAppName;
}
/*
* Send an APNM (APplication NaMe) chunk.
*/
private static void sendAPNM(String appName) {
if (Config.LOGV)
Log.v("ddm", "Sending app name");
ByteBuffer out = ByteBuffer.allocate(4 + appName.length()*2);
out.order(ChunkHandler.CHUNK_ORDER);
out.putInt(appName.length());
putString(out, appName);
Chunk chunk = new Chunk(CHUNK_APNM, out);
DdmServer.sendChunk(chunk);
}
}
| gpl-3.0 |
Sweetgrassbuffalo/ReactionSweeGrass-v2 | node_modules/faker/lib/random.js | 5006 | var mersenne = require('../vendor/mersenne');
/**
*
* @namespace faker.random
*/
function Random (faker, seed) {
// Use a user provided seed if it exists
if (seed) {
if (Array.isArray(seed) && seed.length) {
mersenne.seed_array(seed);
}
else {
mersenne.seed(seed);
}
}
/**
* returns a single random number based on a max number or range
*
* @method faker.random.number
* @param {mixed} options
*/
this.number = function (options) {
if (typeof options === "number") {
options = {
max: options
};
}
options = options || {};
if (typeof options.min === "undefined") {
options.min = 0;
}
if (typeof options.max === "undefined") {
options.max = 99999;
}
if (typeof options.precision === "undefined") {
options.precision = 1;
}
// Make the range inclusive of the max value
var max = options.max;
if (max >= 0) {
max += options.precision;
}
var randomNumber = options.precision * Math.floor(
mersenne.rand(max / options.precision, options.min / options.precision));
return randomNumber;
}
/**
* takes an array and returns a random element of the array
*
* @method faker.random.arrayElement
* @param {array} array
*/
this.arrayElement = function (array) {
array = array || ["a", "b", "c"];
var r = faker.random.number({ max: array.length - 1 });
return array[r];
}
/**
* takes an object and returns the randomly key or value
*
* @method faker.random.objectElement
* @param {object} object
* @param {mixed} field
*/
this.objectElement = function (object, field) {
object = object || { "foo": "bar", "too": "car" };
var array = Object.keys(object);
var key = faker.random.arrayElement(array);
return field === "key" ? key : object[key];
}
/**
* uuid
*
* @method faker.random.uuid
*/
this.uuid = function () {
var self = this;
var RFC4122_TEMPLATE = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx';
var replacePlaceholders = function (placeholder) {
var random = self.number({ min: 0, max: 15 });
var value = placeholder == 'x' ? random : (random &0x3 | 0x8);
return value.toString(16);
};
return RFC4122_TEMPLATE.replace(/[xy]/g, replacePlaceholders);
}
/**
* boolean
*
* @method faker.random.boolean
*/
this.boolean = function () {
return !!faker.random.number(1)
}
// TODO: have ability to return specific type of word? As in: noun, adjective, verb, etc
/**
* word
*
* @method faker.random.word
* @param {string} type
*/
this.word = function randomWord (type) {
var wordMethods = [
'commerce.department',
'commerce.productName',
'commerce.productAdjective',
'commerce.productMaterial',
'commerce.product',
'commerce.color',
'company.catchPhraseAdjective',
'company.catchPhraseDescriptor',
'company.catchPhraseNoun',
'company.bsAdjective',
'company.bsBuzz',
'company.bsNoun',
'address.streetSuffix',
'address.county',
'address.country',
'address.state',
'finance.accountName',
'finance.transactionType',
'finance.currencyName',
'hacker.noun',
'hacker.verb',
'hacker.adjective',
'hacker.ingverb',
'hacker.abbreviation',
'name.jobDescriptor',
'name.jobArea',
'name.jobType'];
// randomly pick from the many faker methods that can generate words
var randomWordMethod = faker.random.arrayElement(wordMethods);
return faker.fake('{{' + randomWordMethod + '}}');
}
/**
* randomWords
*
* @method faker.random.words
* @param {number} count defaults to a random value between 1 and 3
*/
this.words = function randomWords (count) {
var words = [];
if (typeof count === "undefined") {
count = faker.random.number({min:1, max: 3});
}
for (var i = 0; i<count; i++) {
words.push(faker.random.word());
}
return words.join(' ');
}
/**
* locale
*
* @method faker.random.image
*/
this.image = function randomImage () {
return faker.image.image();
}
/**
* locale
*
* @method faker.random.locale
*/
this.locale = function randomLocale () {
return faker.random.arrayElement(Object.keys(faker.locales));
};
/**
* alphaNumeric
*
* @method faker.random.alphaNumeric
* @param {number} count defaults to 1
*/
this.alphaNumeric = function alphaNumeric(count) {
if (typeof count === "undefined") {
count = 1;
}
var wholeString = "";
for(var i = 0; i < count; i++) {
wholeString += faker.random.arrayElement(["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]);
}
return wholeString;
};
return this;
}
module['exports'] = Random;
| gpl-3.0 |
KamranMackey/Essentials | EssentialsAntiBuild/src/com/earth2me/essentials/antibuild/EssentialsAntiBuild.java | 1783 | package com.earth2me.essentials.antibuild;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import org.bukkit.plugin.Plugin;
import org.bukkit.plugin.PluginManager;
import org.bukkit.plugin.java.JavaPlugin;
public class EssentialsAntiBuild extends JavaPlugin implements IAntiBuild
{
private final transient Map<AntiBuildConfig, Boolean> settingsBoolean = new EnumMap<AntiBuildConfig, Boolean>(AntiBuildConfig.class);
private final transient Map<AntiBuildConfig, List<Integer>> settingsList = new EnumMap<AntiBuildConfig, List<Integer>>(AntiBuildConfig.class);
private transient EssentialsConnect ess = null;
@Override
public void onEnable()
{
final PluginManager pm = this.getServer().getPluginManager();
final Plugin essPlugin = pm.getPlugin("Essentials");
if (essPlugin == null || !essPlugin.isEnabled())
{
return;
}
ess = new EssentialsConnect(essPlugin, this);
final EssentialsAntiBuildListener blockListener = new EssentialsAntiBuildListener(this);
pm.registerEvents(blockListener, this);
}
@Override
public boolean checkProtectionItems(final AntiBuildConfig list, final int id)
{
final List<Integer> itemList = settingsList.get(list);
return itemList != null && !itemList.isEmpty() && itemList.contains(id);
}
@Override
public EssentialsConnect getEssentialsConnect()
{
return ess;
}
@Override
public Map<AntiBuildConfig, Boolean> getSettingsBoolean()
{
return settingsBoolean;
}
@Override
public Map<AntiBuildConfig, List<Integer>> getSettingsList()
{
return settingsList;
}
@Override
public boolean getSettingBool(final AntiBuildConfig protectConfig)
{
final Boolean bool = settingsBoolean.get(protectConfig);
return bool == null ? protectConfig.getDefaultValueBoolean() : bool;
}
}
| gpl-3.0 |
microfire21/a3wasteland | A3Wasteland.Altis/client/systems/hud/dialog/hud.hpp | 2229 | /*
@file Version: 1.0
@file Name: hud.hpp
@file Author: [404] Deadbeat, [KoS] Bewilderbeest
@file Created: 11/09/2012 04:23
@file Args:
*/
#define hud_status_idc 3600
#define hud_vehicle_idc 3601
#define hud_activity_icon_idc 3602
#define hud_activity_textbox_idc 3603
class WastelandHud {
idd = -1;
fadeout=0;
fadein=0;
duration = 20;
name= "WastelandHud";
onLoad = "uiNamespace setVariable ['WastelandHud', _this select 0]";
class controlsBackground {
class WastelandHud_Vehicle:w_RscText
{
idc = hud_vehicle_idc;
type = CT_STRUCTURED_TEXT;
size = 0.040;
x = safeZoneX + (safeZoneW * (1 - (0.42 / SafeZoneW)));
y = safeZoneY + (safeZoneH * (1 - (0.33 / SafeZoneH)));
w = 0.4; h = 0.65;
colorText[] = {1,1,1,1};
lineSpacing = 3;
colorBackground[] = {0,0,0,0};
text = "";
shadow = 2;
class Attributes {
align = "right";
};
};
class WastelandHud_Status:w_RscText
{
idc = hud_status_idc;
type = CT_STRUCTURED_TEXT;
size = 0.040;
x = safeZoneX + (safeZoneW * (1 - (0.16 / SafeZoneW)));
y = safeZoneY + (safeZoneH * (1 - (0.22 / SafeZoneH)));
w = 0.14; h = 0.22;
colorText[] = {1,1,1,1};
lineSpacing = 3;
colorBackground[] = {0,0,0,0};
text = "";
shadow = 2;
class Attributes {
align = "right";
};
};
class WastelandHud_ActivityIcon:w_RscText
{
idc = hud_activity_icon_idc;
type = CT_STRUCTURED_TEXT;
size = 0.03;
x = safeZoneX + (safeZoneW * 0.007);
y = safeZoneY + (safeZoneH * 0.011);
w = (0.06 * 3/4) * safezoneW;
h = 0.05 * safezoneH;
colorText[] = {1,1,1,1};
lineSpacing = 2;
colorBackground[] = {0,0,0,0};
text = "";
shadow = 2;
class Attributes {
align = "center";
valign = "middle";
};
};
class WastelandHud_ActivityTextBox:w_RscText
{
idc = hud_activity_textbox_idc;
type = CT_STRUCTURED_TEXT;
size = 0.03;
x = safeZoneX + (safeZoneW * 0.055);
y = safeZoneY + (safeZoneH * 0.011);
w = 0.18 * safezoneW;
h = 0.05 * safezoneH;
colorText[] = {1,1,1,1};
lineSpacing = 2;
colorBackground[] = {0,0,0,0};
text = "";
shadow = 1;
class Attributes {
align = "left";
valign = "middle";
};
};
};
};
| gpl-3.0 |
scara/moodle | mod/forum/lib.php | 258590 | <?php
// This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
/**
* @package mod_forum
* @copyright 1999 onwards Martin Dougiamas {@link http://moodle.com}
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
defined('MOODLE_INTERNAL') || die();
/** Include required files */
require_once(__DIR__ . '/deprecatedlib.php');
require_once($CFG->libdir.'/filelib.php');
/// CONSTANTS ///////////////////////////////////////////////////////////
define('FORUM_MODE_FLATOLDEST', 1);
define('FORUM_MODE_FLATNEWEST', -1);
define('FORUM_MODE_THREADED', 2);
define('FORUM_MODE_NESTED', 3);
define('FORUM_MODE_NESTED_V2', 4);
define('FORUM_CHOOSESUBSCRIBE', 0);
define('FORUM_FORCESUBSCRIBE', 1);
define('FORUM_INITIALSUBSCRIBE', 2);
define('FORUM_DISALLOWSUBSCRIBE',3);
/**
* FORUM_TRACKING_OFF - Tracking is not available for this forum.
*/
define('FORUM_TRACKING_OFF', 0);
/**
* FORUM_TRACKING_OPTIONAL - Tracking is based on user preference.
*/
define('FORUM_TRACKING_OPTIONAL', 1);
/**
* FORUM_TRACKING_FORCED - Tracking is on, regardless of user setting.
* Treated as FORUM_TRACKING_OPTIONAL if $CFG->forum_allowforcedreadtracking is off.
*/
define('FORUM_TRACKING_FORCED', 2);
define('FORUM_MAILED_PENDING', 0);
define('FORUM_MAILED_SUCCESS', 1);
define('FORUM_MAILED_ERROR', 2);
if (!defined('FORUM_CRON_USER_CACHE')) {
/** Defines how many full user records are cached in forum cron. */
define('FORUM_CRON_USER_CACHE', 5000);
}
/**
* FORUM_POSTS_ALL_USER_GROUPS - All the posts in groups where the user is enrolled.
*/
define('FORUM_POSTS_ALL_USER_GROUPS', -2);
define('FORUM_DISCUSSION_PINNED', 1);
define('FORUM_DISCUSSION_UNPINNED', 0);
/// STANDARD FUNCTIONS ///////////////////////////////////////////////////////////
/**
* Given an object containing all the necessary data,
* (defined by the form in mod_form.php) this function
* will create a new instance and return the id number
* of the new instance.
*
* @param stdClass $forum add forum instance
* @param mod_forum_mod_form $mform
* @return int intance id
*/
function forum_add_instance($forum, $mform = null) {
global $CFG, $DB;
require_once($CFG->dirroot.'/mod/forum/locallib.php');
$forum->timemodified = time();
if (empty($forum->assessed)) {
$forum->assessed = 0;
}
if (empty($forum->ratingtime) or empty($forum->assessed)) {
$forum->assesstimestart = 0;
$forum->assesstimefinish = 0;
}
$forum->id = $DB->insert_record('forum', $forum);
$modcontext = context_module::instance($forum->coursemodule);
if ($forum->type == 'single') { // Create related discussion.
$discussion = new stdClass();
$discussion->course = $forum->course;
$discussion->forum = $forum->id;
$discussion->name = $forum->name;
$discussion->assessed = $forum->assessed;
$discussion->message = $forum->intro;
$discussion->messageformat = $forum->introformat;
$discussion->messagetrust = trusttext_trusted(context_course::instance($forum->course));
$discussion->mailnow = false;
$discussion->groupid = -1;
$message = '';
$discussion->id = forum_add_discussion($discussion, null, $message);
if ($mform and $draftid = file_get_submitted_draft_itemid('introeditor')) {
// Ugly hack - we need to copy the files somehow.
$discussion = $DB->get_record('forum_discussions', array('id'=>$discussion->id), '*', MUST_EXIST);
$post = $DB->get_record('forum_posts', array('id'=>$discussion->firstpost), '*', MUST_EXIST);
$options = array('subdirs'=>true); // Use the same options as intro field!
$post->message = file_save_draft_area_files($draftid, $modcontext->id, 'mod_forum', 'post', $post->id, $options, $post->message);
$DB->set_field('forum_posts', 'message', $post->message, array('id'=>$post->id));
}
}
forum_update_calendar($forum, $forum->coursemodule);
forum_grade_item_update($forum);
$completiontimeexpected = !empty($forum->completionexpected) ? $forum->completionexpected : null;
\core_completion\api::update_completion_date_event($forum->coursemodule, 'forum', $forum->id, $completiontimeexpected);
return $forum->id;
}
/**
* Handle changes following the creation of a forum instance.
* This function is typically called by the course_module_created observer.
*
* @param object $context the forum context
* @param stdClass $forum The forum object
* @return void
*/
function forum_instance_created($context, $forum) {
if ($forum->forcesubscribe == FORUM_INITIALSUBSCRIBE) {
$users = \mod_forum\subscriptions::get_potential_subscribers($context, 0, 'u.id, u.email');
foreach ($users as $user) {
\mod_forum\subscriptions::subscribe_user($user->id, $forum, $context);
}
}
}
/**
* Given an object containing all the necessary data,
* (defined by the form in mod_form.php) this function
* will update an existing instance with new data.
*
* @global object
* @param object $forum forum instance (with magic quotes)
* @return bool success
*/
function forum_update_instance($forum, $mform) {
global $CFG, $DB, $OUTPUT, $USER;
require_once($CFG->dirroot.'/mod/forum/locallib.php');
$forum->timemodified = time();
$forum->id = $forum->instance;
if (empty($forum->assessed)) {
$forum->assessed = 0;
}
if (empty($forum->ratingtime) or empty($forum->assessed)) {
$forum->assesstimestart = 0;
$forum->assesstimefinish = 0;
}
$oldforum = $DB->get_record('forum', array('id'=>$forum->id));
// MDL-3942 - if the aggregation type or scale (i.e. max grade) changes then recalculate the grades for the entire forum
// if scale changes - do we need to recheck the ratings, if ratings higher than scale how do we want to respond?
// for count and sum aggregation types the grade we check to make sure they do not exceed the scale (i.e. max score) when calculating the grade
$updategrades = false;
if ($oldforum->assessed <> $forum->assessed) {
// Whether this forum is rated.
$updategrades = true;
}
if ($oldforum->scale <> $forum->scale) {
// The scale currently in use.
$updategrades = true;
}
if (empty($oldforum->grade_forum) || $oldforum->grade_forum <> $forum->grade_forum) {
// The whole forum grading.
$updategrades = true;
}
if ($updategrades) {
forum_update_grades($forum); // Recalculate grades for the forum.
}
if ($forum->type == 'single') { // Update related discussion and post.
$discussions = $DB->get_records('forum_discussions', array('forum'=>$forum->id), 'timemodified ASC');
if (!empty($discussions)) {
if (count($discussions) > 1) {
echo $OUTPUT->notification(get_string('warnformorepost', 'forum'));
}
$discussion = array_pop($discussions);
} else {
// try to recover by creating initial discussion - MDL-16262
$discussion = new stdClass();
$discussion->course = $forum->course;
$discussion->forum = $forum->id;
$discussion->name = $forum->name;
$discussion->assessed = $forum->assessed;
$discussion->message = $forum->intro;
$discussion->messageformat = $forum->introformat;
$discussion->messagetrust = true;
$discussion->mailnow = false;
$discussion->groupid = -1;
$message = '';
forum_add_discussion($discussion, null, $message);
if (! $discussion = $DB->get_record('forum_discussions', array('forum'=>$forum->id))) {
print_error('cannotadd', 'forum');
}
}
if (! $post = $DB->get_record('forum_posts', array('id'=>$discussion->firstpost))) {
print_error('cannotfindfirstpost', 'forum');
}
$cm = get_coursemodule_from_instance('forum', $forum->id);
$modcontext = context_module::instance($cm->id, MUST_EXIST);
$post = $DB->get_record('forum_posts', array('id'=>$discussion->firstpost), '*', MUST_EXIST);
$post->subject = $forum->name;
$post->message = $forum->intro;
$post->messageformat = $forum->introformat;
$post->messagetrust = trusttext_trusted($modcontext);
$post->modified = $forum->timemodified;
$post->userid = $USER->id; // MDL-18599, so that current teacher can take ownership of activities.
if ($mform and $draftid = file_get_submitted_draft_itemid('introeditor')) {
// Ugly hack - we need to copy the files somehow.
$options = array('subdirs'=>true); // Use the same options as intro field!
$post->message = file_save_draft_area_files($draftid, $modcontext->id, 'mod_forum', 'post', $post->id, $options, $post->message);
}
\mod_forum\local\entities\post::add_message_counts($post);
$DB->update_record('forum_posts', $post);
$discussion->name = $forum->name;
$DB->update_record('forum_discussions', $discussion);
}
$DB->update_record('forum', $forum);
$modcontext = context_module::instance($forum->coursemodule);
if (($forum->forcesubscribe == FORUM_INITIALSUBSCRIBE) && ($oldforum->forcesubscribe <> $forum->forcesubscribe)) {
$users = \mod_forum\subscriptions::get_potential_subscribers($modcontext, 0, 'u.id, u.email', '');
foreach ($users as $user) {
\mod_forum\subscriptions::subscribe_user($user->id, $forum, $modcontext);
}
}
forum_update_calendar($forum, $forum->coursemodule);
forum_grade_item_update($forum);
$completiontimeexpected = !empty($forum->completionexpected) ? $forum->completionexpected : null;
\core_completion\api::update_completion_date_event($forum->coursemodule, 'forum', $forum->id, $completiontimeexpected);
return true;
}
/**
* Given an ID of an instance of this module,
* this function will permanently delete the instance
* and any data that depends on it.
*
* @global object
* @param int $id forum instance id
* @return bool success
*/
function forum_delete_instance($id) {
global $DB;
if (!$forum = $DB->get_record('forum', array('id'=>$id))) {
return false;
}
if (!$cm = get_coursemodule_from_instance('forum', $forum->id)) {
return false;
}
if (!$course = $DB->get_record('course', array('id'=>$cm->course))) {
return false;
}
$context = context_module::instance($cm->id);
// now get rid of all files
$fs = get_file_storage();
$fs->delete_area_files($context->id);
$result = true;
\core_completion\api::update_completion_date_event($cm->id, 'forum', $forum->id, null);
// Delete digest and subscription preferences.
$DB->delete_records('forum_digests', array('forum' => $forum->id));
$DB->delete_records('forum_subscriptions', array('forum'=>$forum->id));
$DB->delete_records('forum_discussion_subs', array('forum' => $forum->id));
if ($discussions = $DB->get_records('forum_discussions', array('forum'=>$forum->id))) {
foreach ($discussions as $discussion) {
if (!forum_delete_discussion($discussion, true, $course, $cm, $forum)) {
$result = false;
}
}
}
forum_tp_delete_read_records(-1, -1, -1, $forum->id);
forum_grade_item_delete($forum);
// We must delete the module record after we delete the grade item.
if (!$DB->delete_records('forum', array('id'=>$forum->id))) {
$result = false;
}
return $result;
}
/**
* Indicates API features that the forum supports.
*
* @uses FEATURE_GROUPS
* @uses FEATURE_GROUPINGS
* @uses FEATURE_MOD_INTRO
* @uses FEATURE_COMPLETION_TRACKS_VIEWS
* @uses FEATURE_COMPLETION_HAS_RULES
* @uses FEATURE_GRADE_HAS_GRADE
* @uses FEATURE_GRADE_OUTCOMES
* @param string $feature
* @return mixed True if yes (some features may use other values)
*/
function forum_supports($feature) {
switch($feature) {
case FEATURE_GROUPS: return true;
case FEATURE_GROUPINGS: return true;
case FEATURE_MOD_INTRO: return true;
case FEATURE_COMPLETION_TRACKS_VIEWS: return true;
case FEATURE_COMPLETION_HAS_RULES: return true;
case FEATURE_GRADE_HAS_GRADE: return true;
case FEATURE_GRADE_OUTCOMES: return true;
case FEATURE_RATE: return true;
case FEATURE_BACKUP_MOODLE2: return true;
case FEATURE_SHOW_DESCRIPTION: return true;
case FEATURE_PLAGIARISM: return true;
case FEATURE_ADVANCED_GRADING: return true;
default: return null;
}
}
/**
* Create a message-id string to use in the custom headers of forum notification emails
*
* message-id is used by email clients to identify emails and to nest conversations
*
* @param int $postid The ID of the forum post we are notifying the user about
* @param int $usertoid The ID of the user being notified
* @return string A unique message-id
*/
function forum_get_email_message_id($postid, $usertoid) {
return generate_email_messageid(hash('sha256', $postid . 'to' . $usertoid));
}
/**
*
* @param object $course
* @param object $user
* @param object $mod TODO this is not used in this function, refactor
* @param object $forum
* @return object A standard object with 2 variables: info (number of posts for this user) and time (last modified)
*/
function forum_user_outline($course, $user, $mod, $forum) {
global $CFG;
require_once("$CFG->libdir/gradelib.php");
$gradeinfo = '';
$gradetime = 0;
$grades = grade_get_grades($course->id, 'mod', 'forum', $forum->id, $user->id);
if (!empty($grades->items[0]->grades)) {
// Item 0 is the rating.
$grade = reset($grades->items[0]->grades);
$gradetime = max($gradetime, grade_get_date_for_user_grade($grade, $user));
if (!$grade->hidden || has_capability('moodle/grade:viewhidden', context_course::instance($course->id))) {
$gradeinfo .= get_string('gradeforrating', 'forum', $grade) . html_writer::empty_tag('br');
} else {
$gradeinfo .= get_string('gradeforratinghidden', 'forum') . html_writer::empty_tag('br');
}
}
// Item 1 is the whole-forum grade.
if (!empty($grades->items[1]->grades)) {
$grade = reset($grades->items[1]->grades);
$gradetime = max($gradetime, grade_get_date_for_user_grade($grade, $user));
if (!$grade->hidden || has_capability('moodle/grade:viewhidden', context_course::instance($course->id))) {
$gradeinfo .= get_string('gradeforwholeforum', 'forum', $grade) . html_writer::empty_tag('br');
} else {
$gradeinfo .= get_string('gradeforwholeforumhidden', 'forum') . html_writer::empty_tag('br');
}
}
$count = forum_count_user_posts($forum->id, $user->id);
if ($count && $count->postcount > 0) {
$info = get_string("numposts", "forum", $count->postcount);
$time = $count->lastpost;
if ($gradeinfo) {
$info .= ', ' . $gradeinfo;
$time = max($time, $gradetime);
}
return (object) [
'info' => $info,
'time' => $time,
];
} else if ($gradeinfo) {
return (object) [
'info' => $gradeinfo,
'time' => $gradetime,
];
}
return null;
}
/**
* @global object
* @global object
* @param object $coure
* @param object $user
* @param object $mod
* @param object $forum
*/
function forum_user_complete($course, $user, $mod, $forum) {
global $CFG, $USER;
require_once("$CFG->libdir/gradelib.php");
$getgradeinfo = function($grades, string $type) use ($course): string {
global $OUTPUT;
if (empty($grades)) {
return '';
}
$result = '';
$grade = reset($grades);
if (!$grade->hidden || has_capability('moodle/grade:viewhidden', context_course::instance($course->id))) {
$result .= $OUTPUT->container(get_string("gradefor{$type}", "forum", $grade));
if ($grade->str_feedback) {
$result .= $OUTPUT->container(get_string('feedback').': '.$grade->str_feedback);
}
} else {
$result .= $OUTPUT->container(get_string("gradefor{$type}hidden", "forum"));
}
return $result;
};
$grades = grade_get_grades($course->id, 'mod', 'forum', $forum->id, $user->id);
// Item 0 is the rating.
if (!empty($grades->items[0]->grades)) {
echo $getgradeinfo($grades->items[0]->grades, 'rating');
}
// Item 1 is the whole-forum grade.
if (!empty($grades->items[1]->grades)) {
echo $getgradeinfo($grades->items[1]->grades, 'wholeforum');
}
if ($posts = forum_get_user_posts($forum->id, $user->id)) {
if (!$cm = get_coursemodule_from_instance('forum', $forum->id, $course->id)) {
print_error('invalidcoursemodule');
}
$context = context_module::instance($cm->id);
$discussions = forum_get_user_involved_discussions($forum->id, $user->id);
$posts = array_filter($posts, function($post) use ($discussions) {
return isset($discussions[$post->discussion]);
});
$entityfactory = mod_forum\local\container::get_entity_factory();
$rendererfactory = mod_forum\local\container::get_renderer_factory();
$postrenderer = $rendererfactory->get_posts_renderer();
echo $postrenderer->render(
$USER,
[$forum->id => $entityfactory->get_forum_from_stdclass($forum, $context, $cm, $course)],
array_map(function($discussion) use ($entityfactory) {
return $entityfactory->get_discussion_from_stdclass($discussion);
}, $discussions),
array_map(function($post) use ($entityfactory) {
return $entityfactory->get_post_from_stdclass($post);
}, $posts)
);
} else {
echo "<p>".get_string("noposts", "forum")."</p>";
}
}
/**
* @deprecated since Moodle 3.3, when the block_course_overview block was removed.
*/
function forum_filter_user_groups_discussions() {
throw new coding_exception('forum_filter_user_groups_discussions() can not be used any more and is obsolete.');
}
/**
* Returns whether the discussion group is visible by the current user or not.
*
* @since Moodle 2.8, 2.7.1, 2.6.4
* @param cm_info $cm The discussion course module
* @param int $discussiongroupid The discussion groupid
* @return bool
*/
function forum_is_user_group_discussion(cm_info $cm, $discussiongroupid) {
if ($discussiongroupid == -1 || $cm->effectivegroupmode != SEPARATEGROUPS) {
return true;
}
if (isguestuser()) {
return false;
}
if (has_capability('moodle/site:accessallgroups', context_module::instance($cm->id)) ||
in_array($discussiongroupid, $cm->get_modinfo()->get_groups($cm->groupingid))) {
return true;
}
return false;
}
/**
* @deprecated since Moodle 3.3, when the block_course_overview block was removed.
*/
function forum_print_overview() {
throw new coding_exception('forum_print_overview() can not be used any more and is obsolete.');
}
/**
* Given a course and a date, prints a summary of all the new
* messages posted in the course since that date
*
* @global object
* @global object
* @global object
* @uses CONTEXT_MODULE
* @uses VISIBLEGROUPS
* @param object $course
* @param bool $viewfullnames capability
* @param int $timestart
* @return bool success
*/
function forum_print_recent_activity($course, $viewfullnames, $timestart) {
global $USER, $DB, $OUTPUT;
// do not use log table if possible, it may be huge and is expensive to join with other tables
$userfieldsapi = \core_user\fields::for_userpic();
$allnamefields = $userfieldsapi->get_sql('u', false, '', 'duserid', false)->selects;
if (!$posts = $DB->get_records_sql("SELECT p.*,
f.course, f.type AS forumtype, f.name AS forumname, f.intro, f.introformat, f.duedate,
f.cutoffdate, f.assessed AS forumassessed, f.assesstimestart, f.assesstimefinish,
f.scale, f.grade_forum, f.maxbytes, f.maxattachments, f.forcesubscribe,
f.trackingtype, f.rsstype, f.rssarticles, f.timemodified, f.warnafter, f.blockafter,
f.blockperiod, f.completiondiscussions, f.completionreplies, f.completionposts,
f.displaywordcount, f.lockdiscussionafter, f.grade_forum_notify,
d.name AS discussionname, d.firstpost, d.userid AS discussionstarter,
d.assessed AS discussionassessed, d.timemodified, d.usermodified, d.forum, d.groupid,
d.timestart, d.timeend, d.pinned, d.timelocked,
$allnamefields
FROM {forum_posts} p
JOIN {forum_discussions} d ON d.id = p.discussion
JOIN {forum} f ON f.id = d.forum
JOIN {user} u ON u.id = p.userid
WHERE p.created > ? AND f.course = ? AND p.deleted <> 1
ORDER BY p.id ASC", array($timestart, $course->id))) { // order by initial posting date
return false;
}
$modinfo = get_fast_modinfo($course);
$strftimerecent = get_string('strftimerecent');
$managerfactory = mod_forum\local\container::get_manager_factory();
$entityfactory = mod_forum\local\container::get_entity_factory();
$discussions = [];
$capmanagers = [];
$printposts = [];
foreach ($posts as $post) {
if (!isset($modinfo->instances['forum'][$post->forum])) {
// not visible
continue;
}
$cm = $modinfo->instances['forum'][$post->forum];
if (!$cm->uservisible) {
continue;
}
// Get the discussion. Cache if not yet available.
if (!isset($discussions[$post->discussion])) {
// Build the discussion record object from the post data.
$discussionrecord = (object)[
'id' => $post->discussion,
'course' => $post->course,
'forum' => $post->forum,
'name' => $post->discussionname,
'firstpost' => $post->firstpost,
'userid' => $post->discussionstarter,
'groupid' => $post->groupid,
'assessed' => $post->discussionassessed,
'timemodified' => $post->timemodified,
'usermodified' => $post->usermodified,
'timestart' => $post->timestart,
'timeend' => $post->timeend,
'pinned' => $post->pinned,
'timelocked' => $post->timelocked
];
// Build the discussion entity from the factory and cache it.
$discussions[$post->discussion] = $entityfactory->get_discussion_from_stdclass($discussionrecord);
}
$discussionentity = $discussions[$post->discussion];
// Get the capability manager. Cache if not yet available.
if (!isset($capmanagers[$post->forum])) {
$context = context_module::instance($cm->id);
$coursemodule = $cm->get_course_module_record();
// Build the forum record object from the post data.
$forumrecord = (object)[
'id' => $post->forum,
'course' => $post->course,
'type' => $post->forumtype,
'name' => $post->forumname,
'intro' => $post->intro,
'introformat' => $post->introformat,
'duedate' => $post->duedate,
'cutoffdate' => $post->cutoffdate,
'assessed' => $post->forumassessed,
'assesstimestart' => $post->assesstimestart,
'assesstimefinish' => $post->assesstimefinish,
'scale' => $post->scale,
'grade_forum' => $post->grade_forum,
'maxbytes' => $post->maxbytes,
'maxattachments' => $post->maxattachments,
'forcesubscribe' => $post->forcesubscribe,
'trackingtype' => $post->trackingtype,
'rsstype' => $post->rsstype,
'rssarticles' => $post->rssarticles,
'timemodified' => $post->timemodified,
'warnafter' => $post->warnafter,
'blockafter' => $post->blockafter,
'blockperiod' => $post->blockperiod,
'completiondiscussions' => $post->completiondiscussions,
'completionreplies' => $post->completionreplies,
'completionposts' => $post->completionposts,
'displaywordcount' => $post->displaywordcount,
'lockdiscussionafter' => $post->lockdiscussionafter,
'grade_forum_notify' => $post->grade_forum_notify
];
// Build the forum entity from the factory.
$forumentity = $entityfactory->get_forum_from_stdclass($forumrecord, $context, $coursemodule, $course);
// Get the capability manager of this forum and cache it.
$capmanagers[$post->forum] = $managerfactory->get_capability_manager($forumentity);
}
$capabilitymanager = $capmanagers[$post->forum];
// Get the post entity.
$postentity = $entityfactory->get_post_from_stdclass($post);
// Check if the user can view the post.
if ($capabilitymanager->can_view_post($USER, $discussionentity, $postentity)) {
$printposts[] = $post;
}
}
unset($posts);
if (!$printposts) {
return false;
}
echo $OUTPUT->heading(get_string('newforumposts', 'forum') . ':', 6);
$list = html_writer::start_tag('ul', ['class' => 'unlist']);
foreach ($printposts as $post) {
$subjectclass = empty($post->parent) ? ' bold' : '';
$authorhidden = forum_is_author_hidden($post, (object) ['type' => $post->forumtype]);
$list .= html_writer::start_tag('li');
$list .= html_writer::start_div('head');
$list .= html_writer::div(userdate_htmltime($post->modified, $strftimerecent), 'date');
if (!$authorhidden) {
$list .= html_writer::div(fullname($post, $viewfullnames), 'name');
}
$list .= html_writer::end_div(); // Head.
$list .= html_writer::start_div('info' . $subjectclass);
$discussionurl = new moodle_url('/mod/forum/discuss.php', ['d' => $post->discussion]);
if (!empty($post->parent)) {
$discussionurl->param('parent', $post->parent);
$discussionurl->set_anchor('p'. $post->id);
}
$post->subject = break_up_long_words(format_string($post->subject, true));
$list .= html_writer::link($discussionurl, $post->subject, ['rel' => 'bookmark']);
$list .= html_writer::end_div(); // Info.
$list .= html_writer::end_tag('li');
}
$list .= html_writer::end_tag('ul');
echo $list;
return true;
}
/**
* Update activity grades.
*
* @param object $forum
* @param int $userid specific user only, 0 means all
*/
function forum_update_grades($forum, $userid = 0): void {
global $CFG, $DB;
require_once($CFG->libdir.'/gradelib.php');
$ratings = null;
if ($forum->assessed) {
require_once($CFG->dirroot.'/rating/lib.php');
$cm = get_coursemodule_from_instance('forum', $forum->id);
$rm = new rating_manager();
$ratings = $rm->get_user_grades((object) [
'component' => 'mod_forum',
'ratingarea' => 'post',
'contextid' => \context_module::instance($cm->id)->id,
'modulename' => 'forum',
'moduleid ' => $forum->id,
'userid' => $userid,
'aggregationmethod' => $forum->assessed,
'scaleid' => $forum->scale,
'itemtable' => 'forum_posts',
'itemtableusercolumn' => 'userid',
]);
}
$forumgrades = null;
if ($forum->grade_forum) {
$sql = <<<EOF
SELECT
g.userid,
0 as datesubmitted,
g.grade as rawgrade,
g.timemodified as dategraded
FROM {forum} f
JOIN {forum_grades} g ON g.forum = f.id
WHERE f.id = :forumid
EOF;
$params = [
'forumid' => $forum->id,
];
if ($userid) {
$sql .= " AND g.userid = :userid";
$params['userid'] = $userid;
}
$forumgrades = [];
if ($grades = $DB->get_recordset_sql($sql, $params)) {
foreach ($grades as $userid => $grade) {
if ($grade->rawgrade != -1) {
$forumgrades[$userid] = $grade;
}
}
$grades->close();
}
}
forum_grade_item_update($forum, $ratings, $forumgrades);
}
/**
* Create/update grade items for given forum.
*
* @param stdClass $forum Forum object with extra cmidnumber
* @param mixed $grades Optional array/object of grade(s); 'reset' means reset grades in gradebook
*/
function forum_grade_item_update($forum, $ratings = null, $forumgrades = null): void {
global $CFG;
require_once("{$CFG->libdir}/gradelib.php");
// Update the rating.
$item = [
'itemname' => get_string('gradeitemnameforrating', 'forum', $forum),
'idnumber' => $forum->cmidnumber,
];
if (!$forum->assessed || $forum->scale == 0) {
$item['gradetype'] = GRADE_TYPE_NONE;
} else if ($forum->scale > 0) {
$item['gradetype'] = GRADE_TYPE_VALUE;
$item['grademax'] = $forum->scale;
$item['grademin'] = 0;
} else if ($forum->scale < 0) {
$item['gradetype'] = GRADE_TYPE_SCALE;
$item['scaleid'] = -$forum->scale;
}
if ($ratings === 'reset') {
$item['reset'] = true;
$ratings = null;
}
// Itemnumber 0 is the rating.
grade_update('mod/forum', $forum->course, 'mod', 'forum', $forum->id, 0, $ratings, $item);
// Whole forum grade.
$item = [
'itemname' => get_string('gradeitemnameforwholeforum', 'forum', $forum),
// Note: We do not need to store the idnumber here.
];
if (!$forum->grade_forum) {
$item['gradetype'] = GRADE_TYPE_NONE;
} else if ($forum->grade_forum > 0) {
$item['gradetype'] = GRADE_TYPE_VALUE;
$item['grademax'] = $forum->grade_forum;
$item['grademin'] = 0;
} else if ($forum->grade_forum < 0) {
$item['gradetype'] = GRADE_TYPE_SCALE;
$item['scaleid'] = $forum->grade_forum * -1;
}
if ($forumgrades === 'reset') {
$item['reset'] = true;
$forumgrades = null;
}
// Itemnumber 1 is the whole forum grade.
grade_update('mod/forum', $forum->course, 'mod', 'forum', $forum->id, 1, $forumgrades, $item);
}
/**
* Delete grade item for given forum.
*
* @param stdClass $forum Forum object
*/
function forum_grade_item_delete($forum) {
global $CFG;
require_once($CFG->libdir.'/gradelib.php');
grade_update('mod/forum', $forum->course, 'mod', 'forum', $forum->id, 0, null, ['deleted' => 1]);
grade_update('mod/forum', $forum->course, 'mod', 'forum', $forum->id, 1, null, ['deleted' => 1]);
}
/**
* Checks if scale is being used by any instance of forum.
*
* This is used to find out if scale used anywhere.
*
* @param $scaleid int
* @return boolean True if the scale is used by any forum
*/
function forum_scale_used_anywhere(int $scaleid): bool {
global $DB;
if (empty($scaleid)) {
return false;
}
return $DB->record_exists_select('forum', "scale = ? and assessed > 0", [$scaleid * -1]);
}
// SQL FUNCTIONS ///////////////////////////////////////////////////////////
/**
* Gets a post with all info ready for forum_print_post
* Most of these joins are just to get the forum id
*
* @global object
* @global object
* @param int $postid
* @return mixed array of posts or false
*/
function forum_get_post_full($postid) {
global $CFG, $DB;
$userfieldsapi = \core_user\fields::for_name();
$allnames = $userfieldsapi->get_sql('u', false, '', '', false)->selects;
return $DB->get_record_sql("SELECT p.*, d.forum, $allnames, u.email, u.picture, u.imagealt
FROM {forum_posts} p
JOIN {forum_discussions} d ON p.discussion = d.id
LEFT JOIN {user} u ON p.userid = u.id
WHERE p.id = ?", array($postid));
}
/**
* Gets all posts in discussion including top parent.
*
* @param int $discussionid The Discussion to fetch.
* @param string $sort The sorting to apply.
* @param bool $tracking Whether the user tracks this forum.
* @return array The posts in the discussion.
*/
function forum_get_all_discussion_posts($discussionid, $sort, $tracking = false) {
global $CFG, $DB, $USER;
$tr_sel = "";
$tr_join = "";
$params = array();
if ($tracking) {
$tr_sel = ", fr.id AS postread";
$tr_join = "LEFT JOIN {forum_read} fr ON (fr.postid = p.id AND fr.userid = ?)";
$params[] = $USER->id;
}
$userfieldsapi = \core_user\fields::for_name();
$allnames = $userfieldsapi->get_sql('u', false, '', '', false)->selects;
$params[] = $discussionid;
if (!$posts = $DB->get_records_sql("SELECT p.*, $allnames, u.email, u.picture, u.imagealt $tr_sel
FROM {forum_posts} p
LEFT JOIN {user} u ON p.userid = u.id
$tr_join
WHERE p.discussion = ?
ORDER BY $sort", $params)) {
return array();
}
foreach ($posts as $pid=>$p) {
if ($tracking) {
if (forum_tp_is_post_old($p)) {
$posts[$pid]->postread = true;
}
}
if (!$p->parent) {
continue;
}
if (!isset($posts[$p->parent])) {
continue; // parent does not exist??
}
if (!isset($posts[$p->parent]->children)) {
$posts[$p->parent]->children = array();
}
$posts[$p->parent]->children[$pid] =& $posts[$pid];
}
// Start with the last child of the first post.
$post = &$posts[reset($posts)->id];
$lastpost = false;
while (!$lastpost) {
if (!isset($post->children)) {
$post->lastpost = true;
$lastpost = true;
} else {
// Go to the last child of this post.
$post = &$posts[end($post->children)->id];
}
}
return $posts;
}
/**
* An array of forum objects that the user is allowed to read/search through.
*
* @global object
* @global object
* @global object
* @param int $userid
* @param int $courseid if 0, we look for forums throughout the whole site.
* @return array of forum objects, or false if no matches
* Forum objects have the following attributes:
* id, type, course, cmid, cmvisible, cmgroupmode, accessallgroups,
* viewhiddentimedposts
*/
function forum_get_readable_forums($userid, $courseid=0) {
global $CFG, $DB, $USER;
require_once($CFG->dirroot.'/course/lib.php');
if (!$forummod = $DB->get_record('modules', array('name' => 'forum'))) {
print_error('notinstalled', 'forum');
}
if ($courseid) {
$courses = $DB->get_records('course', array('id' => $courseid));
} else {
// If no course is specified, then the user can see SITE + his courses.
$courses1 = $DB->get_records('course', array('id' => SITEID));
$courses2 = enrol_get_users_courses($userid, true, array('modinfo'));
$courses = array_merge($courses1, $courses2);
}
if (!$courses) {
return array();
}
$readableforums = array();
foreach ($courses as $course) {
$modinfo = get_fast_modinfo($course);
if (empty($modinfo->instances['forum'])) {
// hmm, no forums?
continue;
}
$courseforums = $DB->get_records('forum', array('course' => $course->id));
foreach ($modinfo->instances['forum'] as $forumid => $cm) {
if (!$cm->uservisible or !isset($courseforums[$forumid])) {
continue;
}
$context = context_module::instance($cm->id);
$forum = $courseforums[$forumid];
$forum->context = $context;
$forum->cm = $cm;
if (!has_capability('mod/forum:viewdiscussion', $context)) {
continue;
}
/// group access
if (groups_get_activity_groupmode($cm, $course) == SEPARATEGROUPS and !has_capability('moodle/site:accessallgroups', $context)) {
$forum->onlygroups = $modinfo->get_groups($cm->groupingid);
$forum->onlygroups[] = -1;
}
/// hidden timed discussions
$forum->viewhiddentimedposts = true;
if (!empty($CFG->forum_enabletimedposts)) {
if (!has_capability('mod/forum:viewhiddentimedposts', $context)) {
$forum->viewhiddentimedposts = false;
}
}
/// qanda access
if ($forum->type == 'qanda'
&& !has_capability('mod/forum:viewqandawithoutposting', $context)) {
// We need to check whether the user has posted in the qanda forum.
$forum->onlydiscussions = array(); // Holds discussion ids for the discussions
// the user is allowed to see in this forum.
if ($discussionspostedin = forum_discussions_user_has_posted_in($forum->id, $USER->id)) {
foreach ($discussionspostedin as $d) {
$forum->onlydiscussions[] = $d->id;
}
}
}
$readableforums[$forum->id] = $forum;
}
unset($modinfo);
} // End foreach $courses
return $readableforums;
}
/**
* Returns a list of posts found using an array of search terms.
*
* @global object
* @global object
* @global object
* @param array $searchterms array of search terms, e.g. word +word -word
* @param int $courseid if 0, we search through the whole site
* @param int $limitfrom
* @param int $limitnum
* @param int &$totalcount
* @param string $extrasql
* @return array|bool Array of posts found or false
*/
function forum_search_posts($searchterms, $courseid, $limitfrom, $limitnum,
&$totalcount, $extrasql='') {
global $CFG, $DB, $USER;
require_once($CFG->libdir.'/searchlib.php');
$forums = forum_get_readable_forums($USER->id, $courseid);
if (count($forums) == 0) {
$totalcount = 0;
return false;
}
$now = floor(time() / 60) * 60; // DB Cache Friendly.
$fullaccess = array();
$where = array();
$params = array();
foreach ($forums as $forumid => $forum) {
$select = array();
if (!$forum->viewhiddentimedposts) {
$select[] = "(d.userid = :userid{$forumid} OR (d.timestart < :timestart{$forumid} AND (d.timeend = 0 OR d.timeend > :timeend{$forumid})))";
$params = array_merge($params, array('userid'.$forumid=>$USER->id, 'timestart'.$forumid=>$now, 'timeend'.$forumid=>$now));
}
$cm = $forum->cm;
$context = $forum->context;
if ($forum->type == 'qanda'
&& !has_capability('mod/forum:viewqandawithoutposting', $context)) {
if (!empty($forum->onlydiscussions)) {
list($discussionid_sql, $discussionid_params) = $DB->get_in_or_equal($forum->onlydiscussions, SQL_PARAMS_NAMED, 'qanda'.$forumid.'_');
$params = array_merge($params, $discussionid_params);
$select[] = "(d.id $discussionid_sql OR p.parent = 0)";
} else {
$select[] = "p.parent = 0";
}
}
if (!empty($forum->onlygroups)) {
list($groupid_sql, $groupid_params) = $DB->get_in_or_equal($forum->onlygroups, SQL_PARAMS_NAMED, 'grps'.$forumid.'_');
$params = array_merge($params, $groupid_params);
$select[] = "d.groupid $groupid_sql";
}
if ($select) {
$selects = implode(" AND ", $select);
$where[] = "(d.forum = :forum{$forumid} AND $selects)";
$params['forum'.$forumid] = $forumid;
} else {
$fullaccess[] = $forumid;
}
}
if ($fullaccess) {
list($fullid_sql, $fullid_params) = $DB->get_in_or_equal($fullaccess, SQL_PARAMS_NAMED, 'fula');
$params = array_merge($params, $fullid_params);
$where[] = "(d.forum $fullid_sql)";
}
$favjoin = "";
if (in_array('starredonly:on', $searchterms)) {
$usercontext = context_user::instance($USER->id);
$ufservice = \core_favourites\service_factory::get_service_for_user_context($usercontext);
list($favjoin, $favparams) = $ufservice->get_join_sql_by_type('mod_forum', 'discussions',
"favourited", "d.id");
$searchterms = array_values(array_diff($searchterms, array('starredonly:on')));
$params = array_merge($params, $favparams);
$extrasql .= " AND favourited.itemid IS NOT NULL AND favourited.itemid != 0";
}
$selectdiscussion = "(".implode(" OR ", $where).")";
$messagesearch = '';
$searchstring = '';
// Need to concat these back together for parser to work.
foreach($searchterms as $searchterm){
if ($searchstring != '') {
$searchstring .= ' ';
}
$searchstring .= $searchterm;
}
// We need to allow quoted strings for the search. The quotes *should* be stripped
// by the parser, but this should be examined carefully for security implications.
$searchstring = str_replace("\\\"","\"",$searchstring);
$parser = new search_parser();
$lexer = new search_lexer($parser);
if ($lexer->parse($searchstring)) {
$parsearray = $parser->get_parsed_array();
$tagjoins = '';
$tagfields = [];
$tagfieldcount = 0;
if ($parsearray) {
foreach ($parsearray as $token) {
if ($token->getType() == TOKEN_TAGS) {
for ($i = 0; $i <= substr_count($token->getValue(), ','); $i++) {
// Queries can only have a limited number of joins so set a limit sensible users won't exceed.
if ($tagfieldcount > 10) {
continue;
}
$tagjoins .= " LEFT JOIN {tag_instance} ti_$tagfieldcount
ON p.id = ti_$tagfieldcount.itemid
AND ti_$tagfieldcount.component = 'mod_forum'
AND ti_$tagfieldcount.itemtype = 'forum_posts'";
$tagjoins .= " LEFT JOIN {tag} t_$tagfieldcount ON t_$tagfieldcount.id = ti_$tagfieldcount.tagid";
$tagfields[] = "t_$tagfieldcount.rawname";
$tagfieldcount++;
}
}
}
list($messagesearch, $msparams) = search_generate_SQL($parsearray, 'p.message', 'p.subject',
'p.userid', 'u.id', 'u.firstname',
'u.lastname', 'p.modified', 'd.forum',
$tagfields);
$params = ($msparams ? array_merge($params, $msparams) : $params);
}
}
$fromsql = "{forum_posts} p
INNER JOIN {forum_discussions} d ON d.id = p.discussion
INNER JOIN {user} u ON u.id = p.userid $tagjoins $favjoin";
$selectsql = ($messagesearch ? $messagesearch . " AND " : "").
" p.discussion = d.id
AND p.userid = u.id
AND $selectdiscussion
$extrasql";
$countsql = "SELECT COUNT(*)
FROM $fromsql
WHERE $selectsql";
$userfieldsapi = \core_user\fields::for_name();
$allnames = $userfieldsapi->get_sql('u', false, '', '', false)->selects;
$searchsql = "SELECT p.*,
d.forum,
$allnames,
u.email,
u.picture,
u.imagealt
FROM $fromsql
WHERE $selectsql
ORDER BY p.modified DESC";
$totalcount = $DB->count_records_sql($countsql, $params);
return $DB->get_records_sql($searchsql, $params, $limitfrom, $limitnum);
}
/**
* Get all the posts for a user in a forum suitable for forum_print_post
*
* @global object
* @global object
* @uses CONTEXT_MODULE
* @return array
*/
function forum_get_user_posts($forumid, $userid) {
global $CFG, $DB;
$timedsql = "";
$params = array($forumid, $userid);
if (!empty($CFG->forum_enabletimedposts)) {
$cm = get_coursemodule_from_instance('forum', $forumid);
if (!has_capability('mod/forum:viewhiddentimedposts' , context_module::instance($cm->id))) {
$now = time();
$timedsql = "AND (d.timestart < ? AND (d.timeend = 0 OR d.timeend > ?))";
$params[] = $now;
$params[] = $now;
}
}
$userfieldsapi = \core_user\fields::for_name();
$allnames = $userfieldsapi->get_sql('u', false, '', '', false)->selects;
return $DB->get_records_sql("SELECT p.*, d.forum, $allnames, u.email, u.picture, u.imagealt
FROM {forum} f
JOIN {forum_discussions} d ON d.forum = f.id
JOIN {forum_posts} p ON p.discussion = d.id
JOIN {user} u ON u.id = p.userid
WHERE f.id = ?
AND p.userid = ?
$timedsql
ORDER BY p.modified ASC", $params);
}
/**
* Get all the discussions user participated in
*
* @global object
* @global object
* @uses CONTEXT_MODULE
* @param int $forumid
* @param int $userid
* @return array Array or false
*/
function forum_get_user_involved_discussions($forumid, $userid) {
global $CFG, $DB;
$timedsql = "";
$params = array($forumid, $userid);
if (!empty($CFG->forum_enabletimedposts)) {
$cm = get_coursemodule_from_instance('forum', $forumid);
if (!has_capability('mod/forum:viewhiddentimedposts' , context_module::instance($cm->id))) {
$now = time();
$timedsql = "AND (d.timestart < ? AND (d.timeend = 0 OR d.timeend > ?))";
$params[] = $now;
$params[] = $now;
}
}
return $DB->get_records_sql("SELECT DISTINCT d.*
FROM {forum} f
JOIN {forum_discussions} d ON d.forum = f.id
JOIN {forum_posts} p ON p.discussion = d.id
WHERE f.id = ?
AND p.userid = ?
$timedsql", $params);
}
/**
* Get all the posts for a user in a forum suitable for forum_print_post
*
* @global object
* @global object
* @param int $forumid
* @param int $userid
* @return array of counts or false
*/
function forum_count_user_posts($forumid, $userid) {
global $CFG, $DB;
$timedsql = "";
$params = array($forumid, $userid);
if (!empty($CFG->forum_enabletimedposts)) {
$cm = get_coursemodule_from_instance('forum', $forumid);
if (!has_capability('mod/forum:viewhiddentimedposts' , context_module::instance($cm->id))) {
$now = time();
$timedsql = "AND (d.timestart < ? AND (d.timeend = 0 OR d.timeend > ?))";
$params[] = $now;
$params[] = $now;
}
}
return $DB->get_record_sql("SELECT COUNT(p.id) AS postcount, MAX(p.modified) AS lastpost
FROM {forum} f
JOIN {forum_discussions} d ON d.forum = f.id
JOIN {forum_posts} p ON p.discussion = d.id
JOIN {user} u ON u.id = p.userid
WHERE f.id = ?
AND p.userid = ?
$timedsql", $params);
}
/**
* Given a log entry, return the forum post details for it.
*
* @global object
* @global object
* @param object $log
* @return array|null
*/
function forum_get_post_from_log($log) {
global $CFG, $DB;
$userfieldsapi = \core_user\fields::for_name();
$allnames = $userfieldsapi->get_sql('u', false, '', '', false)->selects;
if ($log->action == "add post") {
return $DB->get_record_sql("SELECT p.*, f.type AS forumtype, d.forum, d.groupid, $allnames, u.email, u.picture
FROM {forum_discussions} d,
{forum_posts} p,
{forum} f,
{user} u
WHERE p.id = ?
AND d.id = p.discussion
AND p.userid = u.id
AND u.deleted <> '1'
AND f.id = d.forum", array($log->info));
} else if ($log->action == "add discussion") {
return $DB->get_record_sql("SELECT p.*, f.type AS forumtype, d.forum, d.groupid, $allnames, u.email, u.picture
FROM {forum_discussions} d,
{forum_posts} p,
{forum} f,
{user} u
WHERE d.id = ?
AND d.firstpost = p.id
AND p.userid = u.id
AND u.deleted <> '1'
AND f.id = d.forum", array($log->info));
}
return NULL;
}
/**
* Given a discussion id, return the first post from the discussion
*
* @global object
* @global object
* @param int $dicsussionid
* @return array
*/
function forum_get_firstpost_from_discussion($discussionid) {
global $CFG, $DB;
return $DB->get_record_sql("SELECT p.*
FROM {forum_discussions} d,
{forum_posts} p
WHERE d.id = ?
AND d.firstpost = p.id ", array($discussionid));
}
/**
* Returns an array of counts of replies to each discussion
*
* @param int $forumid
* @param string $forumsort
* @param int $limit
* @param int $page
* @param int $perpage
* @param boolean $canseeprivatereplies Whether the current user can see private replies.
* @return array
*/
function forum_count_discussion_replies($forumid, $forumsort = "", $limit = -1, $page = -1, $perpage = 0,
$canseeprivatereplies = false) {
global $CFG, $DB, $USER;
if ($limit > 0) {
$limitfrom = 0;
$limitnum = $limit;
} else if ($page != -1) {
$limitfrom = $page*$perpage;
$limitnum = $perpage;
} else {
$limitfrom = 0;
$limitnum = 0;
}
if ($forumsort == "") {
$orderby = "";
$groupby = "";
} else {
$orderby = "ORDER BY $forumsort";
$groupby = ", ".strtolower($forumsort);
$groupby = str_replace('desc', '', $groupby);
$groupby = str_replace('asc', '', $groupby);
}
$params = ['forumid' => $forumid];
if (!$canseeprivatereplies) {
$privatewhere = ' AND (p.privatereplyto = :currentuser1 OR p.userid = :currentuser2 OR p.privatereplyto = 0)';
$params['currentuser1'] = $USER->id;
$params['currentuser2'] = $USER->id;
} else {
$privatewhere = '';
}
if (($limitfrom == 0 and $limitnum == 0) or $forumsort == "") {
$sql = "SELECT p.discussion, COUNT(p.id) AS replies, MAX(p.id) AS lastpostid
FROM {forum_posts} p
JOIN {forum_discussions} d ON p.discussion = d.id
WHERE p.parent > 0 AND d.forum = :forumid
$privatewhere
GROUP BY p.discussion";
return $DB->get_records_sql($sql, $params);
} else {
$sql = "SELECT p.discussion, (COUNT(p.id) - 1) AS replies, MAX(p.id) AS lastpostid
FROM {forum_posts} p
JOIN {forum_discussions} d ON p.discussion = d.id
WHERE d.forum = :forumid
$privatewhere
GROUP BY p.discussion $groupby $orderby";
return $DB->get_records_sql($sql, $params, $limitfrom, $limitnum);
}
}
/**
* @global object
* @global object
* @global object
* @staticvar array $cache
* @param object $forum
* @param object $cm
* @param object $course
* @return mixed
*/
function forum_count_discussions($forum, $cm, $course) {
global $CFG, $DB, $USER;
static $cache = array();
$now = floor(time() / 60) * 60; // DB Cache Friendly.
$params = array($course->id);
if (!isset($cache[$course->id])) {
if (!empty($CFG->forum_enabletimedposts)) {
$timedsql = "AND d.timestart < ? AND (d.timeend = 0 OR d.timeend > ?)";
$params[] = $now;
$params[] = $now;
} else {
$timedsql = "";
}
$sql = "SELECT f.id, COUNT(d.id) as dcount
FROM {forum} f
JOIN {forum_discussions} d ON d.forum = f.id
WHERE f.course = ?
$timedsql
GROUP BY f.id";
if ($counts = $DB->get_records_sql($sql, $params)) {
foreach ($counts as $count) {
$counts[$count->id] = $count->dcount;
}
$cache[$course->id] = $counts;
} else {
$cache[$course->id] = array();
}
}
if (empty($cache[$course->id][$forum->id])) {
return 0;
}
$groupmode = groups_get_activity_groupmode($cm, $course);
if ($groupmode != SEPARATEGROUPS) {
return $cache[$course->id][$forum->id];
}
if (has_capability('moodle/site:accessallgroups', context_module::instance($cm->id))) {
return $cache[$course->id][$forum->id];
}
require_once($CFG->dirroot.'/course/lib.php');
$modinfo = get_fast_modinfo($course);
$mygroups = $modinfo->get_groups($cm->groupingid);
// add all groups posts
$mygroups[-1] = -1;
list($mygroups_sql, $params) = $DB->get_in_or_equal($mygroups);
$params[] = $forum->id;
if (!empty($CFG->forum_enabletimedposts)) {
$timedsql = "AND d.timestart < $now AND (d.timeend = 0 OR d.timeend > $now)";
$params[] = $now;
$params[] = $now;
} else {
$timedsql = "";
}
$sql = "SELECT COUNT(d.id)
FROM {forum_discussions} d
WHERE d.groupid $mygroups_sql AND d.forum = ?
$timedsql";
return $DB->get_field_sql($sql, $params);
}
/**
* Get all discussions in a forum
*
* @global object
* @global object
* @global object
* @uses CONTEXT_MODULE
* @uses VISIBLEGROUPS
* @param object $cm
* @param string $forumsort
* @param bool $fullpost
* @param int $unused
* @param int $limit
* @param bool $userlastmodified
* @param int $page
* @param int $perpage
* @param int $groupid if groups enabled, get discussions for this group overriding the current group.
* Use FORUM_POSTS_ALL_USER_GROUPS for all the user groups
* @param int $updatedsince retrieve only discussions updated since the given time
* @return array
*/
function forum_get_discussions($cm, $forumsort="", $fullpost=true, $unused=-1, $limit=-1,
$userlastmodified=false, $page=-1, $perpage=0, $groupid = -1,
$updatedsince = 0) {
global $CFG, $DB, $USER;
$timelimit = '';
$now = floor(time() / 60) * 60;
$params = array($cm->instance);
$modcontext = context_module::instance($cm->id);
if (!has_capability('mod/forum:viewdiscussion', $modcontext)) { /// User must have perms to view discussions
return array();
}
if (!empty($CFG->forum_enabletimedposts)) { /// Users must fulfill timed posts
if (!has_capability('mod/forum:viewhiddentimedposts', $modcontext)) {
$timelimit = " AND ((d.timestart <= ? AND (d.timeend = 0 OR d.timeend > ?))";
$params[] = $now;
$params[] = $now;
if (isloggedin()) {
$timelimit .= " OR d.userid = ?";
$params[] = $USER->id;
}
$timelimit .= ")";
}
}
if ($limit > 0) {
$limitfrom = 0;
$limitnum = $limit;
} else if ($page != -1) {
$limitfrom = $page*$perpage;
$limitnum = $perpage;
} else {
$limitfrom = 0;
$limitnum = 0;
}
$groupmode = groups_get_activity_groupmode($cm);
if ($groupmode) {
if (empty($modcontext)) {
$modcontext = context_module::instance($cm->id);
}
// Special case, we received a groupid to override currentgroup.
if ($groupid > 0) {
$course = get_course($cm->course);
if (!groups_group_visible($groupid, $course, $cm)) {
// User doesn't belong to this group, return nothing.
return array();
}
$currentgroup = $groupid;
} else if ($groupid === -1) {
$currentgroup = groups_get_activity_group($cm);
} else {
// Get discussions for all groups current user can see.
$currentgroup = null;
}
if ($groupmode == VISIBLEGROUPS or has_capability('moodle/site:accessallgroups', $modcontext)) {
if ($currentgroup) {
$groupselect = "AND (d.groupid = ? OR d.groupid = -1)";
$params[] = $currentgroup;
} else {
$groupselect = "";
}
} else {
// Separate groups.
// Get discussions for all groups current user can see.
if ($currentgroup === null) {
$mygroups = array_keys(groups_get_all_groups($cm->course, $USER->id, $cm->groupingid, 'g.id'));
if (empty($mygroups)) {
$groupselect = "AND d.groupid = -1";
} else {
list($insqlgroups, $inparamsgroups) = $DB->get_in_or_equal($mygroups);
$groupselect = "AND (d.groupid = -1 OR d.groupid $insqlgroups)";
$params = array_merge($params, $inparamsgroups);
}
} else if ($currentgroup) {
$groupselect = "AND (d.groupid = ? OR d.groupid = -1)";
$params[] = $currentgroup;
} else {
$groupselect = "AND d.groupid = -1";
}
}
} else {
$groupselect = "";
}
if (empty($forumsort)) {
$forumsort = forum_get_default_sort_order();
}
if (empty($fullpost)) {
$postdata = "p.id, p.subject, p.modified, p.discussion, p.userid, p.created";
} else {
$postdata = "p.*";
}
$userfieldsapi = \core_user\fields::for_name();
if (empty($userlastmodified)) { // We don't need to know this
$umfields = "";
$umtable = "";
} else {
$umfields = $userfieldsapi->get_sql('um', false, 'um')->selects . ', um.email AS umemail, um.picture AS umpicture,
um.imagealt AS umimagealt';
$umtable = " LEFT JOIN {user} um ON (d.usermodified = um.id)";
}
$updatedsincesql = '';
if (!empty($updatedsince)) {
$updatedsincesql = 'AND d.timemodified > ?';
$params[] = $updatedsince;
}
$discussionfields = "d.id as discussionid, d.course, d.forum, d.name, d.firstpost, d.groupid, d.assessed," .
" d.timemodified, d.usermodified, d.timestart, d.timeend, d.pinned, d.timelocked";
$allnames = $userfieldsapi->get_sql('u', false, '', '', false)->selects;
$sql = "SELECT $postdata, $discussionfields,
$allnames, u.email, u.picture, u.imagealt $umfields
FROM {forum_discussions} d
JOIN {forum_posts} p ON p.discussion = d.id
JOIN {user} u ON p.userid = u.id
$umtable
WHERE d.forum = ? AND p.parent = 0
$timelimit $groupselect $updatedsincesql
ORDER BY $forumsort, d.id DESC";
return $DB->get_records_sql($sql, $params, $limitfrom, $limitnum);
}
/**
* Gets the neighbours (previous and next) of a discussion.
*
* The calculation is based on the timemodified when time modified or time created is identical
* It will revert to using the ID to sort consistently. This is better tha skipping a discussion.
*
* For blog-style forums, the calculation is based on the original creation time of the
* blog post.
*
* Please note that this does not check whether or not the discussion passed is accessible
* by the user, it simply uses it as a reference to find the neighbours. On the other hand,
* the returned neighbours are checked and are accessible to the current user.
*
* @param object $cm The CM record.
* @param object $discussion The discussion record.
* @param object $forum The forum instance record.
* @return array That always contains the keys 'prev' and 'next'. When there is a result
* they contain the record with minimal information such as 'id' and 'name'.
* When the neighbour is not found the value is false.
*/
function forum_get_discussion_neighbours($cm, $discussion, $forum) {
global $CFG, $DB, $USER;
if ($cm->instance != $discussion->forum or $discussion->forum != $forum->id or $forum->id != $cm->instance) {
throw new coding_exception('Discussion is not part of the same forum.');
}
$neighbours = array('prev' => false, 'next' => false);
$now = floor(time() / 60) * 60;
$params = array();
$modcontext = context_module::instance($cm->id);
$groupmode = groups_get_activity_groupmode($cm);
$currentgroup = groups_get_activity_group($cm);
// Users must fulfill timed posts.
$timelimit = '';
if (!empty($CFG->forum_enabletimedposts)) {
if (!has_capability('mod/forum:viewhiddentimedposts', $modcontext)) {
$timelimit = ' AND ((d.timestart <= :tltimestart AND (d.timeend = 0 OR d.timeend > :tltimeend))';
$params['tltimestart'] = $now;
$params['tltimeend'] = $now;
if (isloggedin()) {
$timelimit .= ' OR d.userid = :tluserid';
$params['tluserid'] = $USER->id;
}
$timelimit .= ')';
}
}
// Limiting to posts accessible according to groups.
$groupselect = '';
if ($groupmode) {
if ($groupmode == VISIBLEGROUPS || has_capability('moodle/site:accessallgroups', $modcontext)) {
if ($currentgroup) {
$groupselect = 'AND (d.groupid = :groupid OR d.groupid = -1)';
$params['groupid'] = $currentgroup;
}
} else {
if ($currentgroup) {
$groupselect = 'AND (d.groupid = :groupid OR d.groupid = -1)';
$params['groupid'] = $currentgroup;
} else {
$groupselect = 'AND d.groupid = -1';
}
}
}
$params['forumid'] = $cm->instance;
$params['discid1'] = $discussion->id;
$params['discid2'] = $discussion->id;
$params['discid3'] = $discussion->id;
$params['discid4'] = $discussion->id;
$params['disctimecompare1'] = $discussion->timemodified;
$params['disctimecompare2'] = $discussion->timemodified;
$params['pinnedstate1'] = (int) $discussion->pinned;
$params['pinnedstate2'] = (int) $discussion->pinned;
$params['pinnedstate3'] = (int) $discussion->pinned;
$params['pinnedstate4'] = (int) $discussion->pinned;
$sql = "SELECT d.id, d.name, d.timemodified, d.groupid, d.timestart, d.timeend
FROM {forum_discussions} d
JOIN {forum_posts} p ON d.firstpost = p.id
WHERE d.forum = :forumid
AND d.id <> :discid1
$timelimit
$groupselect";
$comparefield = "d.timemodified";
$comparevalue = ":disctimecompare1";
$comparevalue2 = ":disctimecompare2";
if (!empty($CFG->forum_enabletimedposts)) {
// Here we need to take into account the release time (timestart)
// if one is set, of the neighbouring posts and compare it to the
// timestart or timemodified of *this* post depending on if the
// release date of this post is in the future or not.
// This stops discussions that appear later because of the
// timestart value from being buried under discussions that were
// made afterwards.
$comparefield = "CASE WHEN d.timemodified < d.timestart
THEN d.timestart ELSE d.timemodified END";
if ($discussion->timemodified < $discussion->timestart) {
// Normally we would just use the timemodified for sorting
// discussion posts. However, when timed discussions are enabled,
// then posts need to be sorted base on the later of timemodified
// or the release date of the post (timestart).
$params['disctimecompare1'] = $discussion->timestart;
$params['disctimecompare2'] = $discussion->timestart;
}
}
$orderbydesc = forum_get_default_sort_order(true, $comparefield, 'd', false);
$orderbyasc = forum_get_default_sort_order(false, $comparefield, 'd', false);
if ($forum->type === 'blog') {
$subselect = "SELECT pp.created
FROM {forum_discussions} dd
JOIN {forum_posts} pp ON dd.firstpost = pp.id ";
$subselectwhere1 = " WHERE dd.id = :discid3";
$subselectwhere2 = " WHERE dd.id = :discid4";
$comparefield = "p.created";
$sub1 = $subselect.$subselectwhere1;
$comparevalue = "($sub1)";
$sub2 = $subselect.$subselectwhere2;
$comparevalue2 = "($sub2)";
$orderbydesc = "d.pinned, p.created DESC";
$orderbyasc = "d.pinned, p.created ASC";
}
$prevsql = $sql . " AND ( (($comparefield < $comparevalue) AND :pinnedstate1 = d.pinned)
OR ($comparefield = $comparevalue2 AND (d.pinned = 0 OR d.pinned = :pinnedstate4) AND d.id < :discid2)
OR (d.pinned = 0 AND d.pinned <> :pinnedstate2))
ORDER BY CASE WHEN d.pinned = :pinnedstate3 THEN 1 ELSE 0 END DESC, $orderbydesc, d.id DESC";
$nextsql = $sql . " AND ( (($comparefield > $comparevalue) AND :pinnedstate1 = d.pinned)
OR ($comparefield = $comparevalue2 AND (d.pinned = 1 OR d.pinned = :pinnedstate4) AND d.id > :discid2)
OR (d.pinned = 1 AND d.pinned <> :pinnedstate2))
ORDER BY CASE WHEN d.pinned = :pinnedstate3 THEN 1 ELSE 0 END DESC, $orderbyasc, d.id ASC";
$neighbours['prev'] = $DB->get_record_sql($prevsql, $params, IGNORE_MULTIPLE);
$neighbours['next'] = $DB->get_record_sql($nextsql, $params, IGNORE_MULTIPLE);
return $neighbours;
}
/**
* Get the sql to use in the ORDER BY clause for forum discussions.
*
* This has the ordering take timed discussion windows into account.
*
* @param bool $desc True for DESC, False for ASC.
* @param string $compare The field in the SQL to compare to normally sort by.
* @param string $prefix The prefix being used for the discussion table.
* @param bool $pinned sort pinned posts to the top
* @return string
*/
function forum_get_default_sort_order($desc = true, $compare = 'd.timemodified', $prefix = 'd', $pinned = true) {
global $CFG;
if (!empty($prefix)) {
$prefix .= '.';
}
$dir = $desc ? 'DESC' : 'ASC';
if ($pinned == true) {
$pinned = "{$prefix}pinned DESC,";
} else {
$pinned = '';
}
$sort = "{$prefix}timemodified";
if (!empty($CFG->forum_enabletimedposts)) {
$sort = "CASE WHEN {$compare} < {$prefix}timestart
THEN {$prefix}timestart
ELSE {$compare}
END";
}
return "$pinned $sort $dir";
}
/**
*
* @global object
* @global object
* @global object
* @uses CONTEXT_MODULE
* @uses VISIBLEGROUPS
* @param object $cm
* @return array
*/
function forum_get_discussions_unread($cm) {
global $CFG, $DB, $USER;
$now = floor(time() / 60) * 60;
$cutoffdate = $now - ($CFG->forum_oldpostdays*24*60*60);
$params = array();
$groupmode = groups_get_activity_groupmode($cm);
$currentgroup = groups_get_activity_group($cm);
if ($groupmode) {
$modcontext = context_module::instance($cm->id);
if ($groupmode == VISIBLEGROUPS or has_capability('moodle/site:accessallgroups', $modcontext)) {
if ($currentgroup) {
$groupselect = "AND (d.groupid = :currentgroup OR d.groupid = -1)";
$params['currentgroup'] = $currentgroup;
} else {
$groupselect = "";
}
} else {
//separate groups without access all
if ($currentgroup) {
$groupselect = "AND (d.groupid = :currentgroup OR d.groupid = -1)";
$params['currentgroup'] = $currentgroup;
} else {
$groupselect = "AND d.groupid = -1";
}
}
} else {
$groupselect = "";
}
if (!empty($CFG->forum_enabletimedposts)) {
$timedsql = "AND d.timestart < :now1 AND (d.timeend = 0 OR d.timeend > :now2)";
$params['now1'] = $now;
$params['now2'] = $now;
} else {
$timedsql = "";
}
$sql = "SELECT d.id, COUNT(p.id) AS unread
FROM {forum_discussions} d
JOIN {forum_posts} p ON p.discussion = d.id
LEFT JOIN {forum_read} r ON (r.postid = p.id AND r.userid = $USER->id)
WHERE d.forum = {$cm->instance}
AND p.modified >= :cutoffdate AND r.id is NULL
$groupselect
$timedsql
GROUP BY d.id";
$params['cutoffdate'] = $cutoffdate;
if ($unreads = $DB->get_records_sql($sql, $params)) {
foreach ($unreads as $unread) {
$unreads[$unread->id] = $unread->unread;
}
return $unreads;
} else {
return array();
}
}
/**
* @global object
* @global object
* @global object
* @uses CONEXT_MODULE
* @uses VISIBLEGROUPS
* @param object $cm
* @return array
*/
function forum_get_discussions_count($cm) {
global $CFG, $DB, $USER;
$now = floor(time() / 60) * 60;
$params = array($cm->instance);
$groupmode = groups_get_activity_groupmode($cm);
$currentgroup = groups_get_activity_group($cm);
if ($groupmode) {
$modcontext = context_module::instance($cm->id);
if ($groupmode == VISIBLEGROUPS or has_capability('moodle/site:accessallgroups', $modcontext)) {
if ($currentgroup) {
$groupselect = "AND (d.groupid = ? OR d.groupid = -1)";
$params[] = $currentgroup;
} else {
$groupselect = "";
}
} else {
//seprate groups without access all
if ($currentgroup) {
$groupselect = "AND (d.groupid = ? OR d.groupid = -1)";
$params[] = $currentgroup;
} else {
$groupselect = "AND d.groupid = -1";
}
}
} else {
$groupselect = "";
}
$timelimit = "";
if (!empty($CFG->forum_enabletimedposts)) {
$modcontext = context_module::instance($cm->id);
if (!has_capability('mod/forum:viewhiddentimedposts', $modcontext)) {
$timelimit = " AND ((d.timestart <= ? AND (d.timeend = 0 OR d.timeend > ?))";
$params[] = $now;
$params[] = $now;
if (isloggedin()) {
$timelimit .= " OR d.userid = ?";
$params[] = $USER->id;
}
$timelimit .= ")";
}
}
$sql = "SELECT COUNT(d.id)
FROM {forum_discussions} d
JOIN {forum_posts} p ON p.discussion = d.id
WHERE d.forum = ? AND p.parent = 0
$groupselect $timelimit";
return $DB->get_field_sql($sql, $params);
}
// OTHER FUNCTIONS ///////////////////////////////////////////////////////////
/**
* @global object
* @global object
* @param int $courseid
* @param string $type
*/
function forum_get_course_forum($courseid, $type) {
// How to set up special 1-per-course forums
global $CFG, $DB, $OUTPUT, $USER;
if ($forums = $DB->get_records_select("forum", "course = ? AND type = ?", array($courseid, $type), "id ASC")) {
// There should always only be ONE, but with the right combination of
// errors there might be more. In this case, just return the oldest one (lowest ID).
foreach ($forums as $forum) {
return $forum; // ie the first one
}
}
// Doesn't exist, so create one now.
$forum = new stdClass();
$forum->course = $courseid;
$forum->type = "$type";
if (!empty($USER->htmleditor)) {
$forum->introformat = $USER->htmleditor;
}
switch ($forum->type) {
case "news":
$forum->name = get_string("namenews", "forum");
$forum->intro = get_string("intronews", "forum");
$forum->introformat = FORMAT_HTML;
$forum->forcesubscribe = FORUM_FORCESUBSCRIBE;
$forum->assessed = 0;
if ($courseid == SITEID) {
$forum->name = get_string("sitenews");
$forum->forcesubscribe = 0;
}
break;
case "social":
$forum->name = get_string("namesocial", "forum");
$forum->intro = get_string("introsocial", "forum");
$forum->introformat = FORMAT_HTML;
$forum->assessed = 0;
$forum->forcesubscribe = 0;
break;
case "blog":
$forum->name = get_string('blogforum', 'forum');
$forum->intro = get_string('introblog', 'forum');
$forum->introformat = FORMAT_HTML;
$forum->assessed = 0;
$forum->forcesubscribe = 0;
break;
default:
echo $OUTPUT->notification("That forum type doesn't exist!");
return false;
break;
}
$forum->timemodified = time();
$forum->id = $DB->insert_record("forum", $forum);
if (! $module = $DB->get_record("modules", array("name" => "forum"))) {
echo $OUTPUT->notification("Could not find forum module!!");
return false;
}
$mod = new stdClass();
$mod->course = $courseid;
$mod->module = $module->id;
$mod->instance = $forum->id;
$mod->section = 0;
include_once("$CFG->dirroot/course/lib.php");
if (! $mod->coursemodule = add_course_module($mod) ) {
echo $OUTPUT->notification("Could not add a new course module to the course '" . $courseid . "'");
return false;
}
$sectionid = course_add_cm_to_section($courseid, $mod->coursemodule, 0);
return $DB->get_record("forum", array("id" => "$forum->id"));
}
/**
* Return rating related permissions
*
* @param string $options the context id
* @return array an associative array of the user's rating permissions
*/
function forum_rating_permissions($contextid, $component, $ratingarea) {
$context = context::instance_by_id($contextid, MUST_EXIST);
if ($component != 'mod_forum' || $ratingarea != 'post') {
// We don't know about this component/ratingarea so just return null to get the
// default restrictive permissions.
return null;
}
return array(
'view' => has_capability('mod/forum:viewrating', $context),
'viewany' => has_capability('mod/forum:viewanyrating', $context),
'viewall' => has_capability('mod/forum:viewallratings', $context),
'rate' => has_capability('mod/forum:rate', $context)
);
}
/**
* Validates a submitted rating
* @param array $params submitted data
* context => object the context in which the rated items exists [required]
* component => The component for this module - should always be mod_forum [required]
* ratingarea => object the context in which the rated items exists [required]
*
* itemid => int the ID of the object being rated [required]
* scaleid => int the scale from which the user can select a rating. Used for bounds checking. [required]
* rating => int the submitted rating [required]
* rateduserid => int the id of the user whose items have been rated. NOT the user who submitted the ratings. 0 to update all. [required]
* aggregation => int the aggregation method to apply when calculating grades ie RATING_AGGREGATE_AVERAGE [required]
* @return boolean true if the rating is valid. Will throw rating_exception if not
*/
function forum_rating_validate($params) {
global $DB, $USER;
// Check the component is mod_forum
if ($params['component'] != 'mod_forum') {
throw new rating_exception('invalidcomponent');
}
// Check the ratingarea is post (the only rating area in forum)
if ($params['ratingarea'] != 'post') {
throw new rating_exception('invalidratingarea');
}
// Check the rateduserid is not the current user .. you can't rate your own posts
if ($params['rateduserid'] == $USER->id) {
throw new rating_exception('nopermissiontorate');
}
// Fetch all the related records ... we need to do this anyway to call forum_user_can_see_post
$post = $DB->get_record('forum_posts', array('id' => $params['itemid'], 'userid' => $params['rateduserid']), '*', MUST_EXIST);
$discussion = $DB->get_record('forum_discussions', array('id' => $post->discussion), '*', MUST_EXIST);
$forum = $DB->get_record('forum', array('id' => $discussion->forum), '*', MUST_EXIST);
$course = $DB->get_record('course', array('id' => $forum->course), '*', MUST_EXIST);
$cm = get_coursemodule_from_instance('forum', $forum->id, $course->id , false, MUST_EXIST);
$context = context_module::instance($cm->id);
// Make sure the context provided is the context of the forum
if ($context->id != $params['context']->id) {
throw new rating_exception('invalidcontext');
}
if ($forum->scale != $params['scaleid']) {
//the scale being submitted doesnt match the one in the database
throw new rating_exception('invalidscaleid');
}
// check the item we're rating was created in the assessable time window
if (!empty($forum->assesstimestart) && !empty($forum->assesstimefinish)) {
if ($post->created < $forum->assesstimestart || $post->created > $forum->assesstimefinish) {
throw new rating_exception('notavailable');
}
}
//check that the submitted rating is valid for the scale
// lower limit
if ($params['rating'] < 0 && $params['rating'] != RATING_UNSET_RATING) {
throw new rating_exception('invalidnum');
}
// upper limit
if ($forum->scale < 0) {
//its a custom scale
$scalerecord = $DB->get_record('scale', array('id' => -$forum->scale));
if ($scalerecord) {
$scalearray = explode(',', $scalerecord->scale);
if ($params['rating'] > count($scalearray)) {
throw new rating_exception('invalidnum');
}
} else {
throw new rating_exception('invalidscaleid');
}
} else if ($params['rating'] > $forum->scale) {
//if its numeric and submitted rating is above maximum
throw new rating_exception('invalidnum');
}
// Make sure groups allow this user to see the item they're rating
if ($discussion->groupid > 0 and $groupmode = groups_get_activity_groupmode($cm, $course)) { // Groups are being used
if (!groups_group_exists($discussion->groupid)) { // Can't find group
throw new rating_exception('cannotfindgroup');//something is wrong
}
if (!groups_is_member($discussion->groupid) and !has_capability('moodle/site:accessallgroups', $context)) {
// do not allow rating of posts from other groups when in SEPARATEGROUPS or VISIBLEGROUPS
throw new rating_exception('notmemberofgroup');
}
}
// perform some final capability checks
if (!forum_user_can_see_post($forum, $discussion, $post, $USER, $cm)) {
throw new rating_exception('nopermissiontorate');
}
return true;
}
/**
* Can the current user see ratings for a given itemid?
*
* @param array $params submitted data
* contextid => int contextid [required]
* component => The component for this module - should always be mod_forum [required]
* ratingarea => object the context in which the rated items exists [required]
* itemid => int the ID of the object being rated [required]
* scaleid => int scale id [optional]
* @return bool
* @throws coding_exception
* @throws rating_exception
*/
function mod_forum_rating_can_see_item_ratings($params) {
global $DB, $USER;
// Check the component is mod_forum.
if (!isset($params['component']) || $params['component'] != 'mod_forum') {
throw new rating_exception('invalidcomponent');
}
// Check the ratingarea is post (the only rating area in forum).
if (!isset($params['ratingarea']) || $params['ratingarea'] != 'post') {
throw new rating_exception('invalidratingarea');
}
if (!isset($params['itemid'])) {
throw new rating_exception('invaliditemid');
}
$post = $DB->get_record('forum_posts', array('id' => $params['itemid']), '*', MUST_EXIST);
$discussion = $DB->get_record('forum_discussions', array('id' => $post->discussion), '*', MUST_EXIST);
$forum = $DB->get_record('forum', array('id' => $discussion->forum), '*', MUST_EXIST);
$course = $DB->get_record('course', array('id' => $forum->course), '*', MUST_EXIST);
$cm = get_coursemodule_from_instance('forum', $forum->id, $course->id , false, MUST_EXIST);
// Perform some final capability checks.
if (!forum_user_can_see_post($forum, $discussion, $post, $USER, $cm)) {
return false;
}
return true;
}
/**
* This function prints the overview of a discussion in the forum listing.
* It needs some discussion information and some post information, these
* happen to be combined for efficiency in the $post parameter by the function
* that calls this one: forum_print_latest_discussions()
*
* @global object
* @global object
* @param object $post The post object (passed by reference for speed).
* @param object $forum The forum object.
* @param int $group Current group.
* @param string $datestring Format to use for the dates.
* @param boolean $cantrack Is tracking enabled for this forum.
* @param boolean $forumtracked Is the user tracking this forum.
* @param boolean $canviewparticipants True if user has the viewparticipants permission for this course
* @param boolean $canviewhiddentimedposts True if user has the viewhiddentimedposts permission for this forum
*/
function forum_print_discussion_header(&$post, $forum, $group = -1, $datestring = "",
$cantrack = true, $forumtracked = true, $canviewparticipants = true, $modcontext = null,
$canviewhiddentimedposts = false) {
global $COURSE, $USER, $CFG, $OUTPUT, $PAGE;
static $rowcount;
static $strmarkalldread;
if (empty($modcontext)) {
if (!$cm = get_coursemodule_from_instance('forum', $forum->id, $forum->course)) {
print_error('invalidcoursemodule');
}
$modcontext = context_module::instance($cm->id);
}
if (!isset($rowcount)) {
$rowcount = 0;
$strmarkalldread = get_string('markalldread', 'forum');
} else {
$rowcount = ($rowcount + 1) % 2;
}
$post->subject = format_string($post->subject,true);
$canviewfullnames = has_capability('moodle/site:viewfullnames', $modcontext);
$timeddiscussion = !empty($CFG->forum_enabletimedposts) && ($post->timestart || $post->timeend);
$timedoutsidewindow = '';
if ($timeddiscussion && ($post->timestart > time() || ($post->timeend != 0 && $post->timeend < time()))) {
$timedoutsidewindow = ' dimmed_text';
}
echo "\n\n";
echo '<tr class="discussion r'.$rowcount.$timedoutsidewindow.'">';
$topicclass = 'topic starter';
if (FORUM_DISCUSSION_PINNED == $post->pinned) {
$topicclass .= ' pinned';
}
echo '<td class="'.$topicclass.'">';
if (FORUM_DISCUSSION_PINNED == $post->pinned) {
echo $OUTPUT->pix_icon('i/pinned', get_string('discussionpinned', 'forum'), 'mod_forum');
}
$canalwaysseetimedpost = $USER->id == $post->userid || $canviewhiddentimedposts;
if ($timeddiscussion && $canalwaysseetimedpost) {
echo $PAGE->get_renderer('mod_forum')->timed_discussion_tooltip($post, empty($timedoutsidewindow));
}
echo '<a href="'.$CFG->wwwroot.'/mod/forum/discuss.php?d='.$post->discussion.'">'.$post->subject.'</a>';
echo "</td>\n";
// Picture
$postuser = new stdClass();
$postuserfields = explode(',', implode(',', \core_user\fields::get_picture_fields()));
$postuser = username_load_fields_from_object($postuser, $post, null, $postuserfields);
$postuser->id = $post->userid;
echo '<td class="author">';
echo '<div class="media">';
echo '<span class="float-left">';
echo $OUTPUT->user_picture($postuser, array('courseid'=>$forum->course));
echo '</span>';
// User name
echo '<div class="media-body">';
$fullname = fullname($postuser, $canviewfullnames);
echo '<a href="'.$CFG->wwwroot.'/user/view.php?id='.$post->userid.'&course='.$forum->course.'">'.$fullname.'</a>';
echo '</div>';
echo '</div>';
echo "</td>\n";
// Group picture
if ($group !== -1) { // Groups are active - group is a group data object or NULL
echo '<td class="picture group">';
if (!empty($group->picture)) {
if ($canviewparticipants && $COURSE->groupmode) {
$picturelink = true;
} else {
$picturelink = false;
}
print_group_picture($group, $forum->course, false, false, $picturelink);
} else if (isset($group->id)) {
if ($canviewparticipants && $COURSE->groupmode) {
echo '<a href="'.$CFG->wwwroot.'/user/index.php?id='.$forum->course.'&group='.$group->id.'">'.$group->name.'</a>';
} else {
echo $group->name;
}
}
echo "</td>\n";
}
if (has_capability('mod/forum:viewdiscussion', $modcontext)) { // Show the column with replies
echo '<td class="replies">';
echo '<a href="'.$CFG->wwwroot.'/mod/forum/discuss.php?d='.$post->discussion.'">';
echo $post->replies.'</a>';
echo "</td>\n";
if ($cantrack) {
echo '<td class="replies">';
if ($forumtracked) {
if ($post->unread > 0) {
echo '<span class="unread">';
echo '<a href="'.$CFG->wwwroot.'/mod/forum/discuss.php?d='.$post->discussion.'#unread">';
echo $post->unread;
echo '</a>';
echo '<a title="'.$strmarkalldread.'" href="'.$CFG->wwwroot.'/mod/forum/markposts.php?f='.
$forum->id.'&d='.$post->discussion.'&mark=read&return=/mod/forum/view.php&sesskey=' .
sesskey() . '">' . $OUTPUT->pix_icon('t/markasread', $strmarkalldread) . '</a>';
echo '</span>';
} else {
echo '<span class="read">';
echo $post->unread;
echo '</span>';
}
} else {
echo '<span class="read">';
echo '-';
echo '</span>';
}
echo "</td>\n";
}
}
echo '<td class="lastpost">';
$usedate = (empty($post->timemodified)) ? $post->created : $post->timemodified;
$parenturl = '';
$usermodified = new stdClass();
$usermodified->id = $post->usermodified;
$usermodified = username_load_fields_from_object($usermodified, $post, 'um');
// In QA forums we check that the user can view participants.
if ($forum->type !== 'qanda' || $canviewparticipants) {
echo '<a href="'.$CFG->wwwroot.'/user/view.php?id='.$post->usermodified.'&course='.$forum->course.'">'.
fullname($usermodified, $canviewfullnames).'</a><br />';
$parenturl = (empty($post->lastpostid)) ? '' : '&parent='.$post->lastpostid;
}
echo '<a href="'.$CFG->wwwroot.'/mod/forum/discuss.php?d='.$post->discussion.$parenturl.'">'.
userdate_htmltime($usedate, $datestring).'</a>';
echo "</td>\n";
// is_guest should be used here as this also checks whether the user is a guest in the current course.
// Guests and visitors cannot subscribe - only enrolled users.
if ((!is_guest($modcontext, $USER) && isloggedin()) && has_capability('mod/forum:viewdiscussion', $modcontext)) {
// Discussion subscription.
if (\mod_forum\subscriptions::is_subscribable($forum)) {
echo '<td class="discussionsubscription">';
echo forum_get_discussion_subscription_icon($forum, $post->discussion);
echo '</td>';
}
}
echo "</tr>\n\n";
}
/**
* Return the markup for the discussion subscription toggling icon.
*
* @param stdClass $forum The forum object.
* @param int $discussionid The discussion to create an icon for.
* @return string The generated markup.
*/
function forum_get_discussion_subscription_icon($forum, $discussionid, $returnurl = null, $includetext = false) {
global $USER, $OUTPUT, $PAGE;
if ($returnurl === null && $PAGE->url) {
$returnurl = $PAGE->url->out();
}
$o = '';
$subscriptionstatus = \mod_forum\subscriptions::is_subscribed($USER->id, $forum, $discussionid);
$subscriptionlink = new moodle_url('/mod/forum/subscribe.php', array(
'sesskey' => sesskey(),
'id' => $forum->id,
'd' => $discussionid,
'returnurl' => $returnurl,
));
if ($includetext) {
$o .= $subscriptionstatus ? get_string('subscribed', 'mod_forum') : get_string('notsubscribed', 'mod_forum');
}
if ($subscriptionstatus) {
$output = $OUTPUT->pix_icon('t/subscribed', get_string('clicktounsubscribe', 'forum'), 'mod_forum');
if ($includetext) {
$output .= get_string('subscribed', 'mod_forum');
}
return html_writer::link($subscriptionlink, $output, array(
'title' => get_string('clicktounsubscribe', 'forum'),
'class' => 'discussiontoggle btn btn-link',
'data-forumid' => $forum->id,
'data-discussionid' => $discussionid,
'data-includetext' => $includetext,
));
} else {
$output = $OUTPUT->pix_icon('t/unsubscribed', get_string('clicktosubscribe', 'forum'), 'mod_forum');
if ($includetext) {
$output .= get_string('notsubscribed', 'mod_forum');
}
return html_writer::link($subscriptionlink, $output, array(
'title' => get_string('clicktosubscribe', 'forum'),
'class' => 'discussiontoggle btn btn-link',
'data-forumid' => $forum->id,
'data-discussionid' => $discussionid,
'data-includetext' => $includetext,
));
}
}
/**
* Return a pair of spans containing classes to allow the subscribe and
* unsubscribe icons to be pre-loaded by a browser.
*
* @return string The generated markup
*/
function forum_get_discussion_subscription_icon_preloaders() {
$o = '';
$o .= html_writer::span(' ', 'preload-subscribe');
$o .= html_writer::span(' ', 'preload-unsubscribe');
return $o;
}
/**
* Print the drop down that allows the user to select how they want to have
* the discussion displayed.
*
* @param int $id forum id if $forumtype is 'single',
* discussion id for any other forum type
* @param mixed $mode forum layout mode
* @param string $forumtype optional
*/
function forum_print_mode_form($id, $mode, $forumtype='') {
global $OUTPUT;
$useexperimentalui = get_user_preferences('forum_useexperimentalui', false);
if ($forumtype == 'single') {
$select = new single_select(
new moodle_url("/mod/forum/view.php",
array('f' => $id)),
'mode',
forum_get_layout_modes($useexperimentalui),
$mode,
null,
"mode"
);
$select->set_label(get_string('displaymode', 'forum'), array('class' => 'accesshide'));
$select->class = "forummode";
} else {
$select = new single_select(
new moodle_url("/mod/forum/discuss.php",
array('d' => $id)),
'mode',
forum_get_layout_modes($useexperimentalui),
$mode,
null,
"mode"
);
$select->set_label(get_string('displaymode', 'forum'), array('class' => 'accesshide'));
}
echo $OUTPUT->render($select);
}
/**
* @global object
* @param object $course
* @param string $search
* @return string
*/
function forum_search_form($course, $search='') {
global $CFG, $PAGE;
$forumsearch = new \mod_forum\output\quick_search_form($course->id, $search);
$output = $PAGE->get_renderer('mod_forum');
return $output->render($forumsearch);
}
/**
* @global object
* @global object
*/
function forum_set_return() {
global $CFG, $SESSION;
if (! isset($SESSION->fromdiscussion)) {
$referer = get_local_referer(false);
// If the referer is NOT a login screen then save it.
if (! strncasecmp("$CFG->wwwroot/login", $referer, 300)) {
$SESSION->fromdiscussion = $referer;
}
}
}
/**
* @global object
* @param string|\moodle_url $default
* @return string
*/
function forum_go_back_to($default) {
global $SESSION;
if (!empty($SESSION->fromdiscussion)) {
$returnto = $SESSION->fromdiscussion;
unset($SESSION->fromdiscussion);
return $returnto;
} else {
return $default;
}
}
/**
* Given a discussion object that is being moved to $forumto,
* this function checks all posts in that discussion
* for attachments, and if any are found, these are
* moved to the new forum directory.
*
* @global object
* @param object $discussion
* @param int $forumfrom source forum id
* @param int $forumto target forum id
* @return bool success
*/
function forum_move_attachments($discussion, $forumfrom, $forumto) {
global $DB;
$fs = get_file_storage();
$newcm = get_coursemodule_from_instance('forum', $forumto);
$oldcm = get_coursemodule_from_instance('forum', $forumfrom);
$newcontext = context_module::instance($newcm->id);
$oldcontext = context_module::instance($oldcm->id);
// loop through all posts, better not use attachment flag ;-)
if ($posts = $DB->get_records('forum_posts', array('discussion'=>$discussion->id), '', 'id, attachment')) {
foreach ($posts as $post) {
$fs->move_area_files_to_new_context($oldcontext->id,
$newcontext->id, 'mod_forum', 'post', $post->id);
$attachmentsmoved = $fs->move_area_files_to_new_context($oldcontext->id,
$newcontext->id, 'mod_forum', 'attachment', $post->id);
if ($attachmentsmoved > 0 && $post->attachment != '1') {
// Weird - let's fix it
$post->attachment = '1';
$DB->update_record('forum_posts', $post);
} else if ($attachmentsmoved == 0 && $post->attachment != '') {
// Weird - let's fix it
$post->attachment = '';
$DB->update_record('forum_posts', $post);
}
}
}
return true;
}
/**
* Returns attachments as formated text/html optionally with separate images
*
* @global object
* @global object
* @global object
* @param object $post
* @param object $cm
* @param string $type html/text/separateimages
* @return mixed string or array of (html text withouth images and image HTML)
*/
function forum_print_attachments($post, $cm, $type) {
global $CFG, $DB, $USER, $OUTPUT;
if (empty($post->attachment)) {
return $type !== 'separateimages' ? '' : array('', '');
}
if (!in_array($type, array('separateimages', 'html', 'text'))) {
return $type !== 'separateimages' ? '' : array('', '');
}
if (!$context = context_module::instance($cm->id)) {
return $type !== 'separateimages' ? '' : array('', '');
}
$strattachment = get_string('attachment', 'forum');
$fs = get_file_storage();
$imagereturn = '';
$output = '';
$canexport = !empty($CFG->enableportfolios) && (has_capability('mod/forum:exportpost', $context) || ($post->userid == $USER->id && has_capability('mod/forum:exportownpost', $context)));
if ($canexport) {
require_once($CFG->libdir.'/portfoliolib.php');
}
// We retrieve all files according to the time that they were created. In the case that several files were uploaded
// at the sametime (e.g. in the case of drag/drop upload) we revert to using the filename.
$files = $fs->get_area_files($context->id, 'mod_forum', 'attachment', $post->id, "filename", false);
if ($files) {
if ($canexport) {
$button = new portfolio_add_button();
}
foreach ($files as $file) {
$filename = $file->get_filename();
$mimetype = $file->get_mimetype();
$iconimage = $OUTPUT->pix_icon(file_file_icon($file), get_mimetype_description($file), 'moodle', array('class' => 'icon'));
$path = file_encode_url($CFG->wwwroot.'/pluginfile.php', '/'.$context->id.'/mod_forum/attachment/'.$post->id.'/'.$filename);
if ($type == 'html') {
$output .= "<a href=\"$path\">$iconimage</a> ";
$output .= "<a href=\"$path\">".s($filename)."</a>";
if ($canexport) {
$button->set_callback_options('forum_portfolio_caller', array('postid' => $post->id, 'attachment' => $file->get_id()), 'mod_forum');
$button->set_format_by_file($file);
$output .= $button->to_html(PORTFOLIO_ADD_ICON_LINK);
}
$output .= "<br />";
} else if ($type == 'text') {
$output .= "$strattachment ".s($filename).":\n$path\n";
} else { //'returnimages'
if (in_array($mimetype, array('image/gif', 'image/jpeg', 'image/png'))) {
// Image attachments don't get printed as links
$imagereturn .= "<br /><img src=\"$path\" alt=\"\" />";
if ($canexport) {
$button->set_callback_options('forum_portfolio_caller', array('postid' => $post->id, 'attachment' => $file->get_id()), 'mod_forum');
$button->set_format_by_file($file);
$imagereturn .= $button->to_html(PORTFOLIO_ADD_ICON_LINK);
}
} else {
$output .= "<a href=\"$path\">$iconimage</a> ";
$output .= format_text("<a href=\"$path\">".s($filename)."</a>", FORMAT_HTML, array('context'=>$context));
if ($canexport) {
$button->set_callback_options('forum_portfolio_caller', array('postid' => $post->id, 'attachment' => $file->get_id()), 'mod_forum');
$button->set_format_by_file($file);
$output .= $button->to_html(PORTFOLIO_ADD_ICON_LINK);
}
$output .= '<br />';
}
}
if (!empty($CFG->enableplagiarism)) {
require_once($CFG->libdir.'/plagiarismlib.php');
$output .= plagiarism_get_links(array('userid' => $post->userid,
'file' => $file,
'cmid' => $cm->id,
'course' => $cm->course,
'forum' => $cm->instance));
$output .= '<br />';
}
}
}
if ($type !== 'separateimages') {
return $output;
} else {
return array($output, $imagereturn);
}
}
////////////////////////////////////////////////////////////////////////////////
// File API //
////////////////////////////////////////////////////////////////////////////////
/**
* Lists all browsable file areas
*
* @package mod_forum
* @category files
* @param stdClass $course course object
* @param stdClass $cm course module object
* @param stdClass $context context object
* @return array
*/
function forum_get_file_areas($course, $cm, $context) {
return array(
'attachment' => get_string('areaattachment', 'mod_forum'),
'post' => get_string('areapost', 'mod_forum'),
);
}
/**
* File browsing support for forum module.
*
* @package mod_forum
* @category files
* @param stdClass $browser file browser object
* @param stdClass $areas file areas
* @param stdClass $course course object
* @param stdClass $cm course module
* @param stdClass $context context module
* @param string $filearea file area
* @param int $itemid item ID
* @param string $filepath file path
* @param string $filename file name
* @return file_info instance or null if not found
*/
function forum_get_file_info($browser, $areas, $course, $cm, $context, $filearea, $itemid, $filepath, $filename) {
global $CFG, $DB, $USER;
if ($context->contextlevel != CONTEXT_MODULE) {
return null;
}
// filearea must contain a real area
if (!isset($areas[$filearea])) {
return null;
}
// Note that forum_user_can_see_post() additionally allows access for parent roles
// and it explicitly checks qanda forum type, too. One day, when we stop requiring
// course:managefiles, we will need to extend this.
if (!has_capability('mod/forum:viewdiscussion', $context)) {
return null;
}
if (is_null($itemid)) {
require_once($CFG->dirroot.'/mod/forum/locallib.php');
return new forum_file_info_container($browser, $course, $cm, $context, $areas, $filearea);
}
static $cached = array();
// $cached will store last retrieved post, discussion and forum. To make sure that the cache
// is cleared between unit tests we check if this is the same session
if (!isset($cached['sesskey']) || $cached['sesskey'] != sesskey()) {
$cached = array('sesskey' => sesskey());
}
if (isset($cached['post']) && $cached['post']->id == $itemid) {
$post = $cached['post'];
} else if ($post = $DB->get_record('forum_posts', array('id' => $itemid))) {
$cached['post'] = $post;
} else {
return null;
}
if (isset($cached['discussion']) && $cached['discussion']->id == $post->discussion) {
$discussion = $cached['discussion'];
} else if ($discussion = $DB->get_record('forum_discussions', array('id' => $post->discussion))) {
$cached['discussion'] = $discussion;
} else {
return null;
}
if (isset($cached['forum']) && $cached['forum']->id == $cm->instance) {
$forum = $cached['forum'];
} else if ($forum = $DB->get_record('forum', array('id' => $cm->instance))) {
$cached['forum'] = $forum;
} else {
return null;
}
$fs = get_file_storage();
$filepath = is_null($filepath) ? '/' : $filepath;
$filename = is_null($filename) ? '.' : $filename;
if (!($storedfile = $fs->get_file($context->id, 'mod_forum', $filearea, $itemid, $filepath, $filename))) {
return null;
}
// Checks to see if the user can manage files or is the owner.
// TODO MDL-33805 - Do not use userid here and move the capability check above.
if (!has_capability('moodle/course:managefiles', $context) && $storedfile->get_userid() != $USER->id) {
return null;
}
// Make sure groups allow this user to see this file
if ($discussion->groupid > 0 && !has_capability('moodle/site:accessallgroups', $context)) {
$groupmode = groups_get_activity_groupmode($cm, $course);
if ($groupmode == SEPARATEGROUPS && !groups_is_member($discussion->groupid)) {
return null;
}
}
// Make sure we're allowed to see it...
if (!forum_user_can_see_post($forum, $discussion, $post, NULL, $cm)) {
return null;
}
$urlbase = $CFG->wwwroot.'/pluginfile.php';
return new file_info_stored($browser, $context, $storedfile, $urlbase, $itemid, true, true, false, false);
}
/**
* Serves the forum attachments. Implements needed access control ;-)
*
* @package mod_forum
* @category files
* @param stdClass $course course object
* @param stdClass $cm course module object
* @param stdClass $context context object
* @param string $filearea file area
* @param array $args extra arguments
* @param bool $forcedownload whether or not force download
* @param array $options additional options affecting the file serving
* @return bool false if file not found, does not return if found - justsend the file
*/
function forum_pluginfile($course, $cm, $context, $filearea, $args, $forcedownload, array $options=array()) {
global $CFG, $DB;
if ($context->contextlevel != CONTEXT_MODULE) {
return false;
}
require_course_login($course, true, $cm);
$areas = forum_get_file_areas($course, $cm, $context);
// filearea must contain a real area
if (!isset($areas[$filearea])) {
return false;
}
$postid = (int)array_shift($args);
if (!$post = $DB->get_record('forum_posts', array('id'=>$postid))) {
return false;
}
if (!$discussion = $DB->get_record('forum_discussions', array('id'=>$post->discussion))) {
return false;
}
if (!$forum = $DB->get_record('forum', array('id'=>$cm->instance))) {
return false;
}
$fs = get_file_storage();
$relativepath = implode('/', $args);
$fullpath = "/$context->id/mod_forum/$filearea/$postid/$relativepath";
if (!$file = $fs->get_file_by_hash(sha1($fullpath)) or $file->is_directory()) {
return false;
}
// Make sure groups allow this user to see this file
if ($discussion->groupid > 0) {
$groupmode = groups_get_activity_groupmode($cm, $course);
if ($groupmode == SEPARATEGROUPS) {
if (!groups_is_member($discussion->groupid) and !has_capability('moodle/site:accessallgroups', $context)) {
return false;
}
}
}
// Make sure we're allowed to see it...
if (!forum_user_can_see_post($forum, $discussion, $post, NULL, $cm)) {
return false;
}
// finally send the file
send_stored_file($file, 0, 0, true, $options); // download MUST be forced - security!
}
/**
* If successful, this function returns the name of the file
*
* @global object
* @param object $post is a full post record, including course and forum
* @param object $forum
* @param object $cm
* @param mixed $mform
* @param string $unused
* @return bool
*/
function forum_add_attachment($post, $forum, $cm, $mform=null, $unused=null) {
global $DB;
if (empty($mform)) {
return false;
}
if (empty($post->attachments)) {
return true; // Nothing to do
}
$context = context_module::instance($cm->id);
$info = file_get_draft_area_info($post->attachments);
$present = ($info['filecount']>0) ? '1' : '';
file_save_draft_area_files($post->attachments, $context->id, 'mod_forum', 'attachment', $post->id,
mod_forum_post_form::attachment_options($forum));
$DB->set_field('forum_posts', 'attachment', $present, array('id'=>$post->id));
return true;
}
/**
* Add a new post in an existing discussion.
*
* @param stdClass $post The post data
* @param mixed $mform The submitted form
* @param string $unused
* @return int
*/
function forum_add_new_post($post, $mform, $unused = null) {
global $USER, $DB;
$discussion = $DB->get_record('forum_discussions', array('id' => $post->discussion));
$forum = $DB->get_record('forum', array('id' => $discussion->forum));
$cm = get_coursemodule_from_instance('forum', $forum->id);
$context = context_module::instance($cm->id);
$privatereplyto = 0;
// Check whether private replies should be enabled for this post.
if ($post->parent) {
$parent = $DB->get_record('forum_posts', array('id' => $post->parent));
if (!empty($parent->privatereplyto)) {
throw new \coding_exception('It should not be possible to reply to a private reply');
}
if (!empty($post->isprivatereply) && forum_user_can_reply_privately($context, $parent)) {
$privatereplyto = $parent->userid;
}
}
$post->created = $post->modified = time();
$post->mailed = FORUM_MAILED_PENDING;
$post->userid = $USER->id;
$post->privatereplyto = $privatereplyto;
$post->attachment = "";
if (!isset($post->totalscore)) {
$post->totalscore = 0;
}
if (!isset($post->mailnow)) {
$post->mailnow = 0;
}
\mod_forum\local\entities\post::add_message_counts($post);
$post->id = $DB->insert_record("forum_posts", $post);
$post->message = file_save_draft_area_files($post->itemid, $context->id, 'mod_forum', 'post', $post->id,
mod_forum_post_form::editor_options($context, null), $post->message);
$DB->set_field('forum_posts', 'message', $post->message, array('id'=>$post->id));
forum_add_attachment($post, $forum, $cm, $mform);
// Update discussion modified date
$DB->set_field("forum_discussions", "timemodified", $post->modified, array("id" => $post->discussion));
$DB->set_field("forum_discussions", "usermodified", $post->userid, array("id" => $post->discussion));
if (forum_tp_can_track_forums($forum) && forum_tp_is_tracked($forum)) {
forum_tp_mark_post_read($post->userid, $post);
}
if (isset($post->tags)) {
core_tag_tag::set_item_tags('mod_forum', 'forum_posts', $post->id, $context, $post->tags);
}
// Let Moodle know that assessable content is uploaded (eg for plagiarism detection)
forum_trigger_content_uploaded_event($post, $cm, 'forum_add_new_post');
return $post->id;
}
/**
* Trigger post updated event.
*
* @param object $post forum post object
* @param object $discussion discussion object
* @param object $context forum context object
* @param object $forum forum object
* @since Moodle 3.8
* @return void
*/
function forum_trigger_post_updated_event($post, $discussion, $context, $forum) {
global $USER;
$params = array(
'context' => $context,
'objectid' => $post->id,
'other' => array(
'discussionid' => $discussion->id,
'forumid' => $forum->id,
'forumtype' => $forum->type,
)
);
if ($USER->id !== $post->userid) {
$params['relateduserid'] = $post->userid;
}
$event = \mod_forum\event\post_updated::create($params);
$event->add_record_snapshot('forum_discussions', $discussion);
$event->trigger();
}
/**
* Update a post.
*
* @param stdClass $newpost The post to update
* @param mixed $mform The submitted form
* @param string $unused
* @return bool
*/
function forum_update_post($newpost, $mform, $unused = null) {
global $DB, $USER;
$post = $DB->get_record('forum_posts', array('id' => $newpost->id));
$discussion = $DB->get_record('forum_discussions', array('id' => $post->discussion));
$forum = $DB->get_record('forum', array('id' => $discussion->forum));
$cm = get_coursemodule_from_instance('forum', $forum->id);
$context = context_module::instance($cm->id);
// Allowed modifiable fields.
$modifiablefields = [
'subject',
'message',
'messageformat',
'messagetrust',
'timestart',
'timeend',
'pinned',
'attachments',
];
foreach ($modifiablefields as $field) {
if (isset($newpost->{$field})) {
$post->{$field} = $newpost->{$field};
}
}
$post->modified = time();
if (!$post->parent) { // Post is a discussion starter - update discussion title and times too
$discussion->name = $post->subject;
$discussion->timestart = $post->timestart;
$discussion->timeend = $post->timeend;
if (isset($post->pinned)) {
$discussion->pinned = $post->pinned;
}
}
$post->message = file_save_draft_area_files($newpost->itemid, $context->id, 'mod_forum', 'post', $post->id,
mod_forum_post_form::editor_options($context, $post->id), $post->message);
\mod_forum\local\entities\post::add_message_counts($post);
$DB->update_record('forum_posts', $post);
// Note: Discussion modified time/user are intentionally not updated, to enable them to track the latest new post.
$DB->update_record('forum_discussions', $discussion);
forum_add_attachment($post, $forum, $cm, $mform);
if ($forum->type == 'single' && $post->parent == '0') {
// Updating first post of single discussion type -> updating forum intro.
$forum->intro = $post->message;
$forum->timemodified = time();
$DB->update_record("forum", $forum);
}
if (isset($newpost->tags)) {
core_tag_tag::set_item_tags('mod_forum', 'forum_posts', $post->id, $context, $newpost->tags);
}
if (forum_tp_can_track_forums($forum) && forum_tp_is_tracked($forum)) {
forum_tp_mark_post_read($USER->id, $post);
}
// Let Moodle know that assessable content is uploaded (eg for plagiarism detection)
forum_trigger_content_uploaded_event($post, $cm, 'forum_update_post');
return true;
}
/**
* Given an object containing all the necessary data,
* create a new discussion and return the id
*
* @param object $post
* @param mixed $mform
* @param string $unused
* @param int $userid
* @return object
*/
function forum_add_discussion($discussion, $mform=null, $unused=null, $userid=null) {
global $USER, $CFG, $DB;
$timenow = isset($discussion->timenow) ? $discussion->timenow : time();
if (is_null($userid)) {
$userid = $USER->id;
}
// The first post is stored as a real post, and linked
// to from the discuss entry.
$forum = $DB->get_record('forum', array('id'=>$discussion->forum));
$cm = get_coursemodule_from_instance('forum', $forum->id);
$post = new stdClass();
$post->discussion = 0;
$post->parent = 0;
$post->privatereplyto = 0;
$post->userid = $userid;
$post->created = $timenow;
$post->modified = $timenow;
$post->mailed = FORUM_MAILED_PENDING;
$post->subject = $discussion->name;
$post->message = $discussion->message;
$post->messageformat = $discussion->messageformat;
$post->messagetrust = $discussion->messagetrust;
$post->attachments = isset($discussion->attachments) ? $discussion->attachments : null;
$post->forum = $forum->id; // speedup
$post->course = $forum->course; // speedup
$post->mailnow = $discussion->mailnow;
\mod_forum\local\entities\post::add_message_counts($post);
$post->id = $DB->insert_record("forum_posts", $post);
// TODO: Fix the calling code so that there always is a $cm when this function is called
if (!empty($cm->id) && !empty($discussion->itemid)) { // In "single simple discussions" this may not exist yet
$context = context_module::instance($cm->id);
$text = file_save_draft_area_files($discussion->itemid, $context->id, 'mod_forum', 'post', $post->id,
mod_forum_post_form::editor_options($context, null), $post->message);
$DB->set_field('forum_posts', 'message', $text, array('id'=>$post->id));
}
// Now do the main entry for the discussion, linking to this first post
$discussion->firstpost = $post->id;
$discussion->timemodified = $timenow;
$discussion->usermodified = $post->userid;
$discussion->userid = $userid;
$discussion->assessed = 0;
$post->discussion = $DB->insert_record("forum_discussions", $discussion);
// Finally, set the pointer on the post.
$DB->set_field("forum_posts", "discussion", $post->discussion, array("id"=>$post->id));
if (!empty($cm->id)) {
forum_add_attachment($post, $forum, $cm, $mform, $unused);
}
if (isset($discussion->tags)) {
core_tag_tag::set_item_tags('mod_forum', 'forum_posts', $post->id, context_module::instance($cm->id), $discussion->tags);
}
if (forum_tp_can_track_forums($forum) && forum_tp_is_tracked($forum)) {
forum_tp_mark_post_read($post->userid, $post);
}
// Let Moodle know that assessable content is uploaded (eg for plagiarism detection)
if (!empty($cm->id)) {
forum_trigger_content_uploaded_event($post, $cm, 'forum_add_discussion');
}
return $post->discussion;
}
/**
* Deletes a discussion and handles all associated cleanup.
*
* @global object
* @param object $discussion Discussion to delete
* @param bool $fulldelete True when deleting entire forum
* @param object $course Course
* @param object $cm Course-module
* @param object $forum Forum
* @return bool
*/
function forum_delete_discussion($discussion, $fulldelete, $course, $cm, $forum) {
global $DB, $CFG;
require_once($CFG->libdir.'/completionlib.php');
$result = true;
if ($posts = $DB->get_records("forum_posts", array("discussion" => $discussion->id))) {
foreach ($posts as $post) {
$post->course = $discussion->course;
$post->forum = $discussion->forum;
if (!forum_delete_post($post, 'ignore', $course, $cm, $forum, $fulldelete)) {
$result = false;
}
}
}
forum_tp_delete_read_records(-1, -1, $discussion->id);
// Discussion subscriptions must be removed before discussions because of key constraints.
$DB->delete_records('forum_discussion_subs', array('discussion' => $discussion->id));
if (!$DB->delete_records("forum_discussions", array("id" => $discussion->id))) {
$result = false;
}
// Update completion state if we are tracking completion based on number of posts
// But don't bother when deleting whole thing
if (!$fulldelete) {
$completion = new completion_info($course);
if ($completion->is_enabled($cm) == COMPLETION_TRACKING_AUTOMATIC &&
($forum->completiondiscussions || $forum->completionreplies || $forum->completionposts)) {
$completion->update_state($cm, COMPLETION_INCOMPLETE, $discussion->userid);
}
}
$params = array(
'objectid' => $discussion->id,
'context' => context_module::instance($cm->id),
'other' => array(
'forumid' => $forum->id,
)
);
$event = \mod_forum\event\discussion_deleted::create($params);
$event->add_record_snapshot('forum_discussions', $discussion);
$event->trigger();
return $result;
}
/**
* Deletes a single forum post.
*
* @global object
* @param object $post Forum post object
* @param mixed $children Whether to delete children. If false, returns false
* if there are any children (without deleting the post). If true,
* recursively deletes all children. If set to special value 'ignore', deletes
* post regardless of children (this is for use only when deleting all posts
* in a disussion).
* @param object $course Course
* @param object $cm Course-module
* @param object $forum Forum
* @param bool $skipcompletion True to skip updating completion state if it
* would otherwise be updated, i.e. when deleting entire forum anyway.
* @return bool
*/
function forum_delete_post($post, $children, $course, $cm, $forum, $skipcompletion=false) {
global $DB, $CFG, $USER;
require_once($CFG->libdir.'/completionlib.php');
$context = context_module::instance($cm->id);
if ($children !== 'ignore' && ($childposts = $DB->get_records('forum_posts', array('parent'=>$post->id)))) {
if ($children) {
foreach ($childposts as $childpost) {
forum_delete_post($childpost, true, $course, $cm, $forum, $skipcompletion);
}
} else {
return false;
}
}
// Delete ratings.
require_once($CFG->dirroot.'/rating/lib.php');
$delopt = new stdClass;
$delopt->contextid = $context->id;
$delopt->component = 'mod_forum';
$delopt->ratingarea = 'post';
$delopt->itemid = $post->id;
$rm = new rating_manager();
$rm->delete_ratings($delopt);
// Delete attachments.
$fs = get_file_storage();
$fs->delete_area_files($context->id, 'mod_forum', 'attachment', $post->id);
$fs->delete_area_files($context->id, 'mod_forum', 'post', $post->id);
// Delete cached RSS feeds.
if (!empty($CFG->enablerssfeeds)) {
require_once($CFG->dirroot.'/mod/forum/rsslib.php');
forum_rss_delete_file($forum);
}
if ($DB->delete_records("forum_posts", array("id" => $post->id))) {
forum_tp_delete_read_records(-1, $post->id);
// Just in case we are deleting the last post
forum_discussion_update_last_post($post->discussion);
// Update completion state if we are tracking completion based on number of posts
// But don't bother when deleting whole thing
if (!$skipcompletion) {
$completion = new completion_info($course);
if ($completion->is_enabled($cm) == COMPLETION_TRACKING_AUTOMATIC &&
($forum->completiondiscussions || $forum->completionreplies || $forum->completionposts)) {
$completion->update_state($cm, COMPLETION_INCOMPLETE, $post->userid);
}
}
$params = array(
'context' => $context,
'objectid' => $post->id,
'other' => array(
'discussionid' => $post->discussion,
'forumid' => $forum->id,
'forumtype' => $forum->type,
)
);
$post->deleted = 1;
if ($post->userid !== $USER->id) {
$params['relateduserid'] = $post->userid;
}
$event = \mod_forum\event\post_deleted::create($params);
$event->add_record_snapshot('forum_posts', $post);
$event->trigger();
return true;
}
return false;
}
/**
* Sends post content to plagiarism plugin
* @param object $post Forum post object
* @param object $cm Course-module
* @param string $name
* @return bool
*/
function forum_trigger_content_uploaded_event($post, $cm, $name) {
$context = context_module::instance($cm->id);
$fs = get_file_storage();
$files = $fs->get_area_files($context->id, 'mod_forum', 'attachment', $post->id, "timemodified", false);
$params = array(
'context' => $context,
'objectid' => $post->id,
'other' => array(
'content' => $post->message,
'pathnamehashes' => array_keys($files),
'discussionid' => $post->discussion,
'triggeredfrom' => $name,
)
);
$event = \mod_forum\event\assessable_uploaded::create($params);
$event->trigger();
return true;
}
/**
* Given a new post, subscribes or unsubscribes as appropriate.
* Returns some text which describes what happened.
*
* @param object $fromform The submitted form
* @param stdClass $forum The forum record
* @param stdClass $discussion The forum discussion record
* @return string
*/
function forum_post_subscription($fromform, $forum, $discussion) {
global $USER;
if (\mod_forum\subscriptions::is_forcesubscribed($forum)) {
return "";
} else if (\mod_forum\subscriptions::subscription_disabled($forum)) {
$subscribed = \mod_forum\subscriptions::is_subscribed($USER->id, $forum);
if ($subscribed && !has_capability('moodle/course:manageactivities', context_course::instance($forum->course), $USER->id)) {
// This user should not be subscribed to the forum.
\mod_forum\subscriptions::unsubscribe_user($USER->id, $forum);
}
return "";
}
$info = new stdClass();
$info->name = fullname($USER);
$info->discussion = format_string($discussion->name);
$info->forum = format_string($forum->name);
if (isset($fromform->discussionsubscribe) && $fromform->discussionsubscribe) {
if ($result = \mod_forum\subscriptions::subscribe_user_to_discussion($USER->id, $discussion)) {
return html_writer::tag('p', get_string('discussionnowsubscribed', 'forum', $info));
}
} else {
if ($result = \mod_forum\subscriptions::unsubscribe_user_from_discussion($USER->id, $discussion)) {
return html_writer::tag('p', get_string('discussionnownotsubscribed', 'forum', $info));
}
}
return '';
}
/**
* Generate and return the subscribe or unsubscribe link for a forum.
*
* @param object $forum the forum. Fields used are $forum->id and $forum->forcesubscribe.
* @param object $context the context object for this forum.
* @param array $messages text used for the link in its various states
* (subscribed, unsubscribed, forcesubscribed or cantsubscribe).
* Any strings not passed in are taken from the $defaultmessages array
* at the top of the function.
* @param bool $cantaccessagroup
* @param bool $unused1
* @param bool $backtoindex
* @param array $unused2
* @return string
*/
function forum_get_subscribe_link($forum, $context, $messages = array(), $cantaccessagroup = false, $unused1 = true,
$backtoindex = false, $unused2 = null) {
global $CFG, $USER, $PAGE, $OUTPUT;
$defaultmessages = array(
'subscribed' => get_string('unsubscribe', 'forum'),
'unsubscribed' => get_string('subscribe', 'forum'),
'cantaccessgroup' => get_string('no'),
'forcesubscribed' => get_string('everyoneissubscribed', 'forum'),
'cantsubscribe' => get_string('disallowsubscribe','forum')
);
$messages = $messages + $defaultmessages;
if (\mod_forum\subscriptions::is_forcesubscribed($forum)) {
return $messages['forcesubscribed'];
} else if (\mod_forum\subscriptions::subscription_disabled($forum) &&
!has_capability('mod/forum:managesubscriptions', $context)) {
return $messages['cantsubscribe'];
} else if ($cantaccessagroup) {
return $messages['cantaccessgroup'];
} else {
if (!is_enrolled($context, $USER, '', true)) {
return '';
}
$subscribed = \mod_forum\subscriptions::is_subscribed($USER->id, $forum);
if ($subscribed) {
$linktext = $messages['subscribed'];
$linktitle = get_string('subscribestop', 'forum');
} else {
$linktext = $messages['unsubscribed'];
$linktitle = get_string('subscribestart', 'forum');
}
$options = array();
if ($backtoindex) {
$backtoindexlink = '&backtoindex=1';
$options['backtoindex'] = 1;
} else {
$backtoindexlink = '';
}
$options['id'] = $forum->id;
$options['sesskey'] = sesskey();
$url = new moodle_url('/mod/forum/subscribe.php', $options);
return $OUTPUT->single_button($url, $linktext, 'get', array('title' => $linktitle));
}
}
/**
* Returns true if user created new discussion already.
*
* @param int $forumid The forum to check for postings
* @param int $userid The user to check for postings
* @param int $groupid The group to restrict the check to
* @return bool
*/
function forum_user_has_posted_discussion($forumid, $userid, $groupid = null) {
global $CFG, $DB;
$sql = "SELECT 'x'
FROM {forum_discussions} d, {forum_posts} p
WHERE d.forum = ? AND p.discussion = d.id AND p.parent = 0 AND p.userid = ?";
$params = [$forumid, $userid];
if ($groupid) {
$sql .= " AND d.groupid = ?";
$params[] = $groupid;
}
return $DB->record_exists_sql($sql, $params);
}
/**
* @global object
* @global object
* @param int $forumid
* @param int $userid
* @return array
*/
function forum_discussions_user_has_posted_in($forumid, $userid) {
global $CFG, $DB;
$haspostedsql = "SELECT d.id AS id,
d.*
FROM {forum_posts} p,
{forum_discussions} d
WHERE p.discussion = d.id
AND d.forum = ?
AND p.userid = ?";
return $DB->get_records_sql($haspostedsql, array($forumid, $userid));
}
/**
* @global object
* @global object
* @param int $forumid
* @param int $did
* @param int $userid
* @return bool
*/
function forum_user_has_posted($forumid, $did, $userid) {
global $DB;
if (empty($did)) {
// posted in any forum discussion?
$sql = "SELECT 'x'
FROM {forum_posts} p
JOIN {forum_discussions} d ON d.id = p.discussion
WHERE p.userid = :userid AND d.forum = :forumid";
return $DB->record_exists_sql($sql, array('forumid'=>$forumid,'userid'=>$userid));
} else {
return $DB->record_exists('forum_posts', array('discussion'=>$did,'userid'=>$userid));
}
}
/**
* Returns creation time of the first user's post in given discussion
* @global object $DB
* @param int $did Discussion id
* @param int $userid User id
* @return int|bool post creation time stamp or return false
*/
function forum_get_user_posted_time($did, $userid) {
global $DB;
$posttime = $DB->get_field('forum_posts', 'MIN(created)', array('userid'=>$userid, 'discussion'=>$did));
if (empty($posttime)) {
return false;
}
return $posttime;
}
/**
* @global object
* @param object $forum
* @param object $currentgroup
* @param int $unused
* @param object $cm
* @param object $context
* @return bool
*/
function forum_user_can_post_discussion($forum, $currentgroup=null, $unused=-1, $cm=NULL, $context=NULL) {
// $forum is an object
global $USER;
// shortcut - guest and not-logged-in users can not post
if (isguestuser() or !isloggedin()) {
return false;
}
if (!$cm) {
debugging('missing cm', DEBUG_DEVELOPER);
if (!$cm = get_coursemodule_from_instance('forum', $forum->id, $forum->course)) {
print_error('invalidcoursemodule');
}
}
if (!$context) {
$context = context_module::instance($cm->id);
}
if (forum_is_cutoff_date_reached($forum)) {
if (!has_capability('mod/forum:canoverridecutoff', $context)) {
return false;
}
}
if ($currentgroup === null) {
$currentgroup = groups_get_activity_group($cm);
}
$groupmode = groups_get_activity_groupmode($cm);
if ($forum->type == 'news') {
$capname = 'mod/forum:addnews';
} else if ($forum->type == 'qanda') {
$capname = 'mod/forum:addquestion';
} else {
$capname = 'mod/forum:startdiscussion';
}
if (!has_capability($capname, $context)) {
return false;
}
if ($forum->type == 'single') {
return false;
}
if ($forum->type == 'eachuser') {
if (forum_user_has_posted_discussion($forum->id, $USER->id, $currentgroup)) {
return false;
}
}
if (!$groupmode or has_capability('moodle/site:accessallgroups', $context)) {
return true;
}
if ($currentgroup) {
return groups_is_member($currentgroup);
} else {
// no group membership and no accessallgroups means no new discussions
// reverted to 1.7 behaviour in 1.9+, buggy in 1.8.0-1.9.0
return false;
}
}
/**
* This function checks whether the user can reply to posts in a forum
* discussion. Use forum_user_can_post_discussion() to check whether the user
* can start discussions.
*
* @global object
* @global object
* @uses DEBUG_DEVELOPER
* @uses CONTEXT_MODULE
* @uses VISIBLEGROUPS
* @param object $forum forum object
* @param object $discussion
* @param object $user
* @param object $cm
* @param object $course
* @param object $context
* @return bool
*/
function forum_user_can_post($forum, $discussion, $user=NULL, $cm=NULL, $course=NULL, $context=NULL) {
global $USER, $DB;
if (empty($user)) {
$user = $USER;
}
// shortcut - guest and not-logged-in users can not post
if (isguestuser($user) or empty($user->id)) {
return false;
}
if (!isset($discussion->groupid)) {
debugging('incorrect discussion parameter', DEBUG_DEVELOPER);
return false;
}
if (!$cm) {
debugging('missing cm', DEBUG_DEVELOPER);
if (!$cm = get_coursemodule_from_instance('forum', $forum->id, $forum->course)) {
print_error('invalidcoursemodule');
}
}
if (!$course) {
debugging('missing course', DEBUG_DEVELOPER);
if (!$course = $DB->get_record('course', array('id' => $forum->course))) {
print_error('invalidcourseid');
}
}
if (!$context) {
$context = context_module::instance($cm->id);
}
if (forum_is_cutoff_date_reached($forum)) {
if (!has_capability('mod/forum:canoverridecutoff', $context)) {
return false;
}
}
// Check whether the discussion is locked.
if (forum_discussion_is_locked($forum, $discussion)) {
if (!has_capability('mod/forum:canoverridediscussionlock', $context)) {
return false;
}
}
// normal users with temporary guest access can not post, suspended users can not post either
if (!is_viewing($context, $user->id) and !is_enrolled($context, $user->id, '', true)) {
return false;
}
if ($forum->type == 'news') {
$capname = 'mod/forum:replynews';
} else {
$capname = 'mod/forum:replypost';
}
if (!has_capability($capname, $context, $user->id)) {
return false;
}
if (!$groupmode = groups_get_activity_groupmode($cm, $course)) {
return true;
}
if (has_capability('moodle/site:accessallgroups', $context)) {
return true;
}
if ($groupmode == VISIBLEGROUPS) {
if ($discussion->groupid == -1) {
// allow students to reply to all participants discussions - this was not possible in Moodle <1.8
return true;
}
return groups_is_member($discussion->groupid);
} else {
//separate groups
if ($discussion->groupid == -1) {
return false;
}
return groups_is_member($discussion->groupid);
}
}
/**
* Check to ensure a user can view a timed discussion.
*
* @param object $discussion
* @param object $user
* @param object $context
* @return boolean returns true if they can view post, false otherwise
*/
function forum_user_can_see_timed_discussion($discussion, $user, $context) {
global $CFG;
// Check that the user can view a discussion that is normally hidden due to access times.
if (!empty($CFG->forum_enabletimedposts)) {
$time = time();
if (($discussion->timestart != 0 && $discussion->timestart > $time)
|| ($discussion->timeend != 0 && $discussion->timeend < $time)) {
if (!has_capability('mod/forum:viewhiddentimedposts', $context, $user->id)) {
return false;
}
}
}
return true;
}
/**
* Check to ensure a user can view a group discussion.
*
* @param object $discussion
* @param object $cm
* @param object $context
* @return boolean returns true if they can view post, false otherwise
*/
function forum_user_can_see_group_discussion($discussion, $cm, $context) {
// If it's a grouped discussion, make sure the user is a member.
if ($discussion->groupid > 0) {
$groupmode = groups_get_activity_groupmode($cm);
if ($groupmode == SEPARATEGROUPS) {
return groups_is_member($discussion->groupid) || has_capability('moodle/site:accessallgroups', $context);
}
}
return true;
}
/**
* @global object
* @global object
* @uses DEBUG_DEVELOPER
* @param object $forum
* @param object $discussion
* @param object $context
* @param object $user
* @return bool
*/
function forum_user_can_see_discussion($forum, $discussion, $context, $user=NULL) {
global $USER, $DB;
if (empty($user) || empty($user->id)) {
$user = $USER;
}
// retrieve objects (yuk)
if (is_numeric($forum)) {
debugging('missing full forum', DEBUG_DEVELOPER);
if (!$forum = $DB->get_record('forum',array('id'=>$forum))) {
return false;
}
}
if (is_numeric($discussion)) {
debugging('missing full discussion', DEBUG_DEVELOPER);
if (!$discussion = $DB->get_record('forum_discussions',array('id'=>$discussion))) {
return false;
}
}
if (!$cm = get_coursemodule_from_instance('forum', $forum->id, $forum->course)) {
print_error('invalidcoursemodule');
}
if (!has_capability('mod/forum:viewdiscussion', $context)) {
return false;
}
if (!forum_user_can_see_timed_discussion($discussion, $user, $context)) {
return false;
}
if (!forum_user_can_see_group_discussion($discussion, $cm, $context)) {
return false;
}
return true;
}
/**
* Check whether a user can see the specified post.
*
* @param \stdClass $forum The forum to chcek
* @param \stdClass $discussion The discussion the post is in
* @param \stdClass $post The post in question
* @param \stdClass $user The user to test - if not specified, the current user is checked.
* @param \stdClass $cm The Course Module that the forum is in (required).
* @param bool $checkdeleted Whether to check the deleted flag on the post.
* @return bool
*/
function forum_user_can_see_post($forum, $discussion, $post, $user = null, $cm = null, $checkdeleted = true) {
global $CFG, $USER, $DB;
// retrieve objects (yuk)
if (is_numeric($forum)) {
debugging('missing full forum', DEBUG_DEVELOPER);
if (!$forum = $DB->get_record('forum',array('id'=>$forum))) {
return false;
}
}
if (is_numeric($discussion)) {
debugging('missing full discussion', DEBUG_DEVELOPER);
if (!$discussion = $DB->get_record('forum_discussions',array('id'=>$discussion))) {
return false;
}
}
if (is_numeric($post)) {
debugging('missing full post', DEBUG_DEVELOPER);
if (!$post = $DB->get_record('forum_posts',array('id'=>$post))) {
return false;
}
}
if (!isset($post->id) && isset($post->parent)) {
$post->id = $post->parent;
}
if ($checkdeleted && !empty($post->deleted)) {
return false;
}
if (!$cm) {
debugging('missing cm', DEBUG_DEVELOPER);
if (!$cm = get_coursemodule_from_instance('forum', $forum->id, $forum->course)) {
print_error('invalidcoursemodule');
}
}
// Context used throughout function.
$modcontext = context_module::instance($cm->id);
if (empty($user) || empty($user->id)) {
$user = $USER;
}
$canviewdiscussion = (isset($cm->cache) && !empty($cm->cache->caps['mod/forum:viewdiscussion']))
|| has_capability('mod/forum:viewdiscussion', $modcontext, $user->id);
if (!$canviewdiscussion && !has_all_capabilities(array('moodle/user:viewdetails', 'moodle/user:readuserposts'), context_user::instance($post->userid))) {
return false;
}
if (!forum_post_is_visible_privately($post, $cm)) {
return false;
}
if (isset($cm->uservisible)) {
if (!$cm->uservisible) {
return false;
}
} else {
if (!\core_availability\info_module::is_user_visible($cm, $user->id, false)) {
return false;
}
}
if (!forum_user_can_see_timed_discussion($discussion, $user, $modcontext)) {
return false;
}
if (!forum_user_can_see_group_discussion($discussion, $cm, $modcontext)) {
return false;
}
if ($forum->type == 'qanda') {
if (has_capability('mod/forum:viewqandawithoutposting', $modcontext, $user->id) || $post->userid == $user->id
|| (isset($discussion->firstpost) && $discussion->firstpost == $post->id)) {
return true;
}
$firstpost = forum_get_firstpost_from_discussion($discussion->id);
if ($firstpost->userid == $user->id) {
return true;
}
$userfirstpost = forum_get_user_posted_time($discussion->id, $user->id);
return (($userfirstpost !== false && (time() - $userfirstpost >= $CFG->maxeditingtime)));
}
return true;
}
/**
* Returns all forum posts since a given time in specified forum.
*
* @todo Document this functions args
* @global object
* @global object
* @global object
* @global object
*/
function forum_get_recent_mod_activity(&$activities, &$index, $timestart, $courseid, $cmid, $userid=0, $groupid=0) {
global $CFG, $COURSE, $USER, $DB;
if ($COURSE->id == $courseid) {
$course = $COURSE;
} else {
$course = $DB->get_record('course', array('id' => $courseid));
}
$modinfo = get_fast_modinfo($course);
$cm = $modinfo->cms[$cmid];
$params = array($timestart, $cm->instance);
if ($userid) {
$userselect = "AND u.id = ?";
$params[] = $userid;
} else {
$userselect = "";
}
if ($groupid) {
$groupselect = "AND d.groupid = ?";
$params[] = $groupid;
} else {
$groupselect = "";
}
$userfieldsapi = \core_user\fields::for_name();
$allnames = $userfieldsapi->get_sql('u', false, '', '', false)->selects;
if (!$posts = $DB->get_records_sql("SELECT p.*, f.type AS forumtype, d.forum, d.groupid,
d.timestart, d.timeend, d.userid AS duserid,
$allnames, u.email, u.picture, u.imagealt, u.email
FROM {forum_posts} p
JOIN {forum_discussions} d ON d.id = p.discussion
JOIN {forum} f ON f.id = d.forum
JOIN {user} u ON u.id = p.userid
WHERE p.created > ? AND f.id = ?
$userselect $groupselect
ORDER BY p.id ASC", $params)) { // order by initial posting date
return;
}
$groupmode = groups_get_activity_groupmode($cm, $course);
$cm_context = context_module::instance($cm->id);
$viewhiddentimed = has_capability('mod/forum:viewhiddentimedposts', $cm_context);
$accessallgroups = has_capability('moodle/site:accessallgroups', $cm_context);
$printposts = array();
foreach ($posts as $post) {
if (!empty($CFG->forum_enabletimedposts) and $USER->id != $post->duserid
and (($post->timestart > 0 and $post->timestart > time()) or ($post->timeend > 0 and $post->timeend < time()))) {
if (!$viewhiddentimed) {
continue;
}
}
if ($groupmode) {
if ($post->groupid == -1 or $groupmode == VISIBLEGROUPS or $accessallgroups) {
// oki (Open discussions have groupid -1)
} else {
// separate mode
if (isguestuser()) {
// shortcut
continue;
}
if (!in_array($post->groupid, $modinfo->get_groups($cm->groupingid))) {
continue;
}
}
}
$printposts[] = $post;
}
if (!$printposts) {
return;
}
$aname = format_string($cm->name,true);
foreach ($printposts as $post) {
$tmpactivity = new stdClass();
$tmpactivity->type = 'forum';
$tmpactivity->cmid = $cm->id;
$tmpactivity->name = $aname;
$tmpactivity->sectionnum = $cm->sectionnum;
$tmpactivity->timestamp = $post->modified;
$tmpactivity->content = new stdClass();
$tmpactivity->content->id = $post->id;
$tmpactivity->content->discussion = $post->discussion;
$tmpactivity->content->subject = format_string($post->subject);
$tmpactivity->content->parent = $post->parent;
$tmpactivity->content->forumtype = $post->forumtype;
$tmpactivity->user = new stdClass();
$additionalfields = array('id' => 'userid', 'picture', 'imagealt', 'email');
$additionalfields = explode(',', implode(',', \core_user\fields::get_picture_fields()));
$tmpactivity->user = username_load_fields_from_object($tmpactivity->user, $post, null, $additionalfields);
$tmpactivity->user->id = $post->userid;
$activities[$index++] = $tmpactivity;
}
return;
}
/**
* Outputs the forum post indicated by $activity.
*
* @param object $activity the activity object the forum resides in
* @param int $courseid the id of the course the forum resides in
* @param bool $detail not used, but required for compatibilty with other modules
* @param int $modnames not used, but required for compatibilty with other modules
* @param bool $viewfullnames not used, but required for compatibilty with other modules
*/
function forum_print_recent_mod_activity($activity, $courseid, $detail, $modnames, $viewfullnames) {
global $OUTPUT;
$content = $activity->content;
if ($content->parent) {
$class = 'reply';
} else {
$class = 'discussion';
}
$tableoptions = [
'border' => '0',
'cellpadding' => '3',
'cellspacing' => '0',
'class' => 'forum-recent'
];
$output = html_writer::start_tag('table', $tableoptions);
$output .= html_writer::start_tag('tr');
$post = (object) ['parent' => $content->parent];
$forum = (object) ['type' => $content->forumtype];
$authorhidden = forum_is_author_hidden($post, $forum);
// Show user picture if author should not be hidden.
if (!$authorhidden) {
$pictureoptions = [
'courseid' => $courseid,
'link' => $authorhidden,
'alttext' => $authorhidden,
];
$picture = $OUTPUT->user_picture($activity->user, $pictureoptions);
$output .= html_writer::tag('td', $picture, ['class' => 'userpicture', 'valign' => 'top']);
}
// Discussion title and author.
$output .= html_writer::start_tag('td', ['class' => $class]);
if ($content->parent) {
$class = 'title';
} else {
// Bold the title of new discussions so they stand out.
$class = 'title bold';
}
$output .= html_writer::start_div($class);
if ($detail) {
$aname = s($activity->name);
$output .= $OUTPUT->image_icon('icon', $aname, $activity->type);
}
$discussionurl = new moodle_url('/mod/forum/discuss.php', ['d' => $content->discussion]);
$discussionurl->set_anchor('p' . $activity->content->id);
$output .= html_writer::link($discussionurl, $content->subject);
$output .= html_writer::end_div();
$timestamp = userdate_htmltime($activity->timestamp);
if ($authorhidden) {
$authornamedate = $timestamp;
} else {
$fullname = fullname($activity->user, $viewfullnames);
$userurl = new moodle_url('/user/view.php');
$userurl->params(['id' => $activity->user->id, 'course' => $courseid]);
$by = new stdClass();
$by->name = html_writer::link($userurl, $fullname);
$by->date = $timestamp;
$authornamedate = get_string('bynameondate', 'forum', $by);
}
$output .= html_writer::div($authornamedate, 'user');
$output .= html_writer::end_tag('td');
$output .= html_writer::end_tag('tr');
$output .= html_writer::end_tag('table');
echo $output;
}
/**
* recursively sets the discussion field to $discussionid on $postid and all its children
* used when pruning a post
*
* @global object
* @param int $postid
* @param int $discussionid
* @return bool
*/
function forum_change_discussionid($postid, $discussionid) {
global $DB;
$DB->set_field('forum_posts', 'discussion', $discussionid, array('id' => $postid));
if ($posts = $DB->get_records('forum_posts', array('parent' => $postid))) {
foreach ($posts as $post) {
forum_change_discussionid($post->id, $discussionid);
}
}
return true;
}
/**
* Prints the editing button on subscribers page
*
* @global object
* @global object
* @param int $courseid
* @param int $forumid
* @return string
*/
function forum_update_subscriptions_button($courseid, $forumid) {
global $CFG, $USER;
if (!empty($USER->subscriptionsediting)) {
$string = get_string('managesubscriptionsoff', 'forum');
$edit = "off";
} else {
$string = get_string('managesubscriptionson', 'forum');
$edit = "on";
}
$subscribers = html_writer::start_tag('form', array('action' => $CFG->wwwroot . '/mod/forum/subscribers.php',
'method' => 'get', 'class' => 'form-inline'));
$subscribers .= html_writer::empty_tag('input', array('type' => 'submit', 'value' => $string,
'class' => 'btn btn-secondary'));
$subscribers .= html_writer::empty_tag('input', array('type' => 'hidden', 'name' => 'id', 'value' => $forumid));
$subscribers .= html_writer::empty_tag('input', array('type' => 'hidden', 'name' => 'edit', 'value' => $edit));
$subscribers .= html_writer::end_tag('form');
return $subscribers;
}
// Functions to do with read tracking.
/**
* Mark posts as read.
*
* @global object
* @global object
* @param object $user object
* @param array $postids array of post ids
* @return boolean success
*/
function forum_tp_mark_posts_read($user, $postids) {
global $CFG, $DB;
if (!forum_tp_can_track_forums(false, $user)) {
return true;
}
$status = true;
$now = time();
$cutoffdate = $now - ($CFG->forum_oldpostdays * 24 * 3600);
if (empty($postids)) {
return true;
} else if (count($postids) > 200) {
while ($part = array_splice($postids, 0, 200)) {
$status = forum_tp_mark_posts_read($user, $part) && $status;
}
return $status;
}
list($usql, $postidparams) = $DB->get_in_or_equal($postids, SQL_PARAMS_NAMED, 'postid');
$insertparams = array(
'userid1' => $user->id,
'userid2' => $user->id,
'userid3' => $user->id,
'firstread' => $now,
'lastread' => $now,
'cutoffdate' => $cutoffdate,
);
$params = array_merge($postidparams, $insertparams);
if ($CFG->forum_allowforcedreadtracking) {
$trackingsql = "AND (f.trackingtype = ".FORUM_TRACKING_FORCED."
OR (f.trackingtype = ".FORUM_TRACKING_OPTIONAL." AND tf.id IS NULL))";
} else {
$trackingsql = "AND ((f.trackingtype = ".FORUM_TRACKING_OPTIONAL." OR f.trackingtype = ".FORUM_TRACKING_FORCED.")
AND tf.id IS NULL)";
}
// First insert any new entries.
$sql = "INSERT INTO {forum_read} (userid, postid, discussionid, forumid, firstread, lastread)
SELECT :userid1, p.id, p.discussion, d.forum, :firstread, :lastread
FROM {forum_posts} p
JOIN {forum_discussions} d ON d.id = p.discussion
JOIN {forum} f ON f.id = d.forum
LEFT JOIN {forum_track_prefs} tf ON (tf.userid = :userid2 AND tf.forumid = f.id)
LEFT JOIN {forum_read} fr ON (
fr.userid = :userid3
AND fr.postid = p.id
AND fr.discussionid = d.id
AND fr.forumid = f.id
)
WHERE p.id $usql
AND p.modified >= :cutoffdate
$trackingsql
AND fr.id IS NULL";
$status = $DB->execute($sql, $params) && $status;
// Then update all records.
$updateparams = array(
'userid' => $user->id,
'lastread' => $now,
);
$params = array_merge($postidparams, $updateparams);
$status = $DB->set_field_select('forum_read', 'lastread', $now, '
userid = :userid
AND lastread <> :lastread
AND postid ' . $usql,
$params) && $status;
return $status;
}
/**
* Mark post as read.
* @global object
* @global object
* @param int $userid
* @param int $postid
*/
function forum_tp_add_read_record($userid, $postid) {
global $CFG, $DB;
$now = time();
$cutoffdate = $now - ($CFG->forum_oldpostdays * 24 * 3600);
if (!$DB->record_exists('forum_read', array('userid' => $userid, 'postid' => $postid))) {
$sql = "INSERT INTO {forum_read} (userid, postid, discussionid, forumid, firstread, lastread)
SELECT ?, p.id, p.discussion, d.forum, ?, ?
FROM {forum_posts} p
JOIN {forum_discussions} d ON d.id = p.discussion
WHERE p.id = ? AND p.modified >= ?";
return $DB->execute($sql, array($userid, $now, $now, $postid, $cutoffdate));
} else {
$sql = "UPDATE {forum_read}
SET lastread = ?
WHERE userid = ? AND postid = ?";
return $DB->execute($sql, array($now, $userid, $userid));
}
}
/**
* If its an old post, do nothing. If the record exists, the maintenance will clear it up later.
*
* @param int $userid The ID of the user to mark posts read for.
* @param object $post The post record for the post to mark as read.
* @param mixed $unused
* @return bool
*/
function forum_tp_mark_post_read($userid, $post, $unused = null) {
if (!forum_tp_is_post_old($post)) {
return forum_tp_add_read_record($userid, $post->id);
} else {
return true;
}
}
/**
* Marks a whole forum as read, for a given user
*
* @global object
* @global object
* @param object $user
* @param int $forumid
* @param int|bool $groupid
* @return bool
*/
function forum_tp_mark_forum_read($user, $forumid, $groupid=false) {
global $CFG, $DB;
$cutoffdate = time() - ($CFG->forum_oldpostdays*24*60*60);
$groupsel = "";
$params = array($user->id, $forumid, $cutoffdate);
if ($groupid !== false) {
$groupsel = " AND (d.groupid = ? OR d.groupid = -1)";
$params[] = $groupid;
}
$sql = "SELECT p.id
FROM {forum_posts} p
LEFT JOIN {forum_discussions} d ON d.id = p.discussion
LEFT JOIN {forum_read} r ON (r.postid = p.id AND r.userid = ?)
WHERE d.forum = ?
AND p.modified >= ? AND r.id is NULL
$groupsel";
if ($posts = $DB->get_records_sql($sql, $params)) {
$postids = array_keys($posts);
return forum_tp_mark_posts_read($user, $postids);
}
return true;
}
/**
* Marks a whole discussion as read, for a given user
*
* @global object
* @global object
* @param object $user
* @param int $discussionid
* @return bool
*/
function forum_tp_mark_discussion_read($user, $discussionid) {
global $CFG, $DB;
$cutoffdate = time() - ($CFG->forum_oldpostdays*24*60*60);
$sql = "SELECT p.id
FROM {forum_posts} p
LEFT JOIN {forum_read} r ON (r.postid = p.id AND r.userid = ?)
WHERE p.discussion = ?
AND p.modified >= ? AND r.id is NULL";
if ($posts = $DB->get_records_sql($sql, array($user->id, $discussionid, $cutoffdate))) {
$postids = array_keys($posts);
return forum_tp_mark_posts_read($user, $postids);
}
return true;
}
/**
* @global object
* @param int $userid
* @param object $post
*/
function forum_tp_is_post_read($userid, $post) {
global $DB;
return (forum_tp_is_post_old($post) ||
$DB->record_exists('forum_read', array('userid' => $userid, 'postid' => $post->id)));
}
/**
* @global object
* @param object $post
* @param int $time Defautls to time()
*/
function forum_tp_is_post_old($post, $time=null) {
global $CFG;
if (is_null($time)) {
$time = time();
}
return ($post->modified < ($time - ($CFG->forum_oldpostdays * 24 * 3600)));
}
/**
* Returns the count of records for the provided user and course.
* Please note that group access is ignored!
*
* @global object
* @global object
* @param int $userid
* @param int $courseid
* @return array
*/
function forum_tp_get_course_unread_posts($userid, $courseid) {
global $CFG, $DB;
$modinfo = get_fast_modinfo($courseid);
$forumcms = $modinfo->get_instances_of('forum');
if (empty($forumcms)) {
// Return early if the course doesn't have any forum. Will save us a DB query.
return [];
}
$now = floor(time() / MINSECS) * MINSECS; // DB cache friendliness.
$cutoffdate = $now - ($CFG->forum_oldpostdays * DAYSECS);
$params = [
'privatereplyto' => $userid,
'modified' => $cutoffdate,
'readuserid' => $userid,
'trackprefsuser' => $userid,
'courseid' => $courseid,
'trackforumuser' => $userid,
];
if (!empty($CFG->forum_enabletimedposts)) {
$timedsql = "AND d.timestart < :timestart AND (d.timeend = 0 OR d.timeend > :timeend)";
$params['timestart'] = $now;
$params['timeend'] = $now;
} else {
$timedsql = "";
}
if ($CFG->forum_allowforcedreadtracking) {
$trackingsql = "AND (f.trackingtype = ".FORUM_TRACKING_FORCED."
OR (f.trackingtype = ".FORUM_TRACKING_OPTIONAL." AND tf.id IS NULL
AND (SELECT trackforums FROM {user} WHERE id = :trackforumuser) = 1))";
} else {
$trackingsql = "AND ((f.trackingtype = ".FORUM_TRACKING_OPTIONAL." OR f.trackingtype = ".FORUM_TRACKING_FORCED.")
AND tf.id IS NULL
AND (SELECT trackforums FROM {user} WHERE id = :trackforumuser) = 1)";
}
$sql = "SELECT f.id, COUNT(p.id) AS unread,
COUNT(p.privatereply) as privatereplies,
COUNT(p.privatereplytouser) as privaterepliestouser
FROM (
SELECT
id,
discussion,
CASE WHEN privatereplyto <> 0 THEN 1 END privatereply,
CASE WHEN privatereplyto = :privatereplyto THEN 1 END privatereplytouser
FROM {forum_posts}
WHERE modified >= :modified
) p
JOIN {forum_discussions} d ON d.id = p.discussion
JOIN {forum} f ON f.id = d.forum
JOIN {course} c ON c.id = f.course
LEFT JOIN {forum_read} r ON (r.postid = p.id AND r.userid = :readuserid)
LEFT JOIN {forum_track_prefs} tf ON (tf.userid = :trackprefsuser AND tf.forumid = f.id)
WHERE f.course = :courseid
AND r.id is NULL
$trackingsql
$timedsql
GROUP BY f.id";
$results = [];
if ($records = $DB->get_records_sql($sql, $params)) {
// Loop through each forum instance to check for capability and count the number of unread posts.
foreach ($forumcms as $cm) {
// Check that the forum instance exists in the query results.
if (!isset($records[$cm->instance])) {
continue;
}
$record = $records[$cm->instance];
$unread = $record->unread;
// Check if the user has the capability to read private replies for this forum instance.
$forumcontext = context_module::instance($cm->id);
if (!has_capability('mod/forum:readprivatereplies', $forumcontext, $userid)) {
// The real unread count would be the total of unread count minus the number of unread private replies plus
// the total unread private replies to the user.
$unread = $record->unread - $record->privatereplies + $record->privaterepliestouser;
}
// Build and add the object to the array of results to be returned.
$results[$record->id] = (object)[
'id' => $record->id,
'unread' => $unread,
];
}
}
return $results;
}
/**
* Returns the count of records for the provided user and forum and [optionally] group.
*
* @global object
* @global object
* @global object
* @param object $cm
* @param object $course
* @param bool $resetreadcache optional, true to reset the function static $readcache var
* @return int
*/
function forum_tp_count_forum_unread_posts($cm, $course, $resetreadcache = false) {
global $CFG, $USER, $DB;
static $readcache = array();
if ($resetreadcache) {
$readcache = array();
}
$forumid = $cm->instance;
if (!isset($readcache[$course->id])) {
$readcache[$course->id] = array();
if ($counts = forum_tp_get_course_unread_posts($USER->id, $course->id)) {
foreach ($counts as $count) {
$readcache[$course->id][$count->id] = $count->unread;
}
}
}
if (empty($readcache[$course->id][$forumid])) {
// no need to check group mode ;-)
return 0;
}
$groupmode = groups_get_activity_groupmode($cm, $course);
if ($groupmode != SEPARATEGROUPS) {
return $readcache[$course->id][$forumid];
}
$forumcontext = context_module::instance($cm->id);
if (has_any_capability(['moodle/site:accessallgroups', 'mod/forum:readprivatereplies'], $forumcontext)) {
return $readcache[$course->id][$forumid];
}
require_once($CFG->dirroot.'/course/lib.php');
$modinfo = get_fast_modinfo($course);
$mygroups = $modinfo->get_groups($cm->groupingid);
// add all groups posts
$mygroups[-1] = -1;
list ($groupssql, $groupsparams) = $DB->get_in_or_equal($mygroups, SQL_PARAMS_NAMED);
$now = floor(time() / MINSECS) * MINSECS; // DB Cache friendliness.
$cutoffdate = $now - ($CFG->forum_oldpostdays * DAYSECS);
$params = [
'readuser' => $USER->id,
'forum' => $forumid,
'cutoffdate' => $cutoffdate,
'privatereplyto' => $USER->id,
];
if (!empty($CFG->forum_enabletimedposts)) {
$timedsql = "AND d.timestart < :timestart AND (d.timeend = 0 OR d.timeend > :timeend)";
$params['timestart'] = $now;
$params['timeend'] = $now;
} else {
$timedsql = "";
}
$params = array_merge($params, $groupsparams);
$sql = "SELECT COUNT(p.id)
FROM {forum_posts} p
JOIN {forum_discussions} d ON p.discussion = d.id
LEFT JOIN {forum_read} r ON (r.postid = p.id AND r.userid = :readuser)
WHERE d.forum = :forum
AND p.modified >= :cutoffdate AND r.id is NULL
$timedsql
AND d.groupid $groupssql
AND (p.privatereplyto = 0 OR p.privatereplyto = :privatereplyto)";
return $DB->get_field_sql($sql, $params);
}
/**
* Deletes read records for the specified index. At least one parameter must be specified.
*
* @global object
* @param int $userid
* @param int $postid
* @param int $discussionid
* @param int $forumid
* @return bool
*/
function forum_tp_delete_read_records($userid=-1, $postid=-1, $discussionid=-1, $forumid=-1) {
global $DB;
$params = array();
$select = '';
if ($userid > -1) {
if ($select != '') $select .= ' AND ';
$select .= 'userid = ?';
$params[] = $userid;
}
if ($postid > -1) {
if ($select != '') $select .= ' AND ';
$select .= 'postid = ?';
$params[] = $postid;
}
if ($discussionid > -1) {
if ($select != '') $select .= ' AND ';
$select .= 'discussionid = ?';
$params[] = $discussionid;
}
if ($forumid > -1) {
if ($select != '') $select .= ' AND ';
$select .= 'forumid = ?';
$params[] = $forumid;
}
if ($select == '') {
return false;
}
else {
return $DB->delete_records_select('forum_read', $select, $params);
}
}
/**
* Get a list of forums not tracked by the user.
*
* @global object
* @global object
* @param int $userid The id of the user to use.
* @param int $courseid The id of the course being checked.
* @return mixed An array indexed by forum id, or false.
*/
function forum_tp_get_untracked_forums($userid, $courseid) {
global $CFG, $DB;
if ($CFG->forum_allowforcedreadtracking) {
$trackingsql = "AND (f.trackingtype = ".FORUM_TRACKING_OFF."
OR (f.trackingtype = ".FORUM_TRACKING_OPTIONAL." AND (ft.id IS NOT NULL
OR (SELECT trackforums FROM {user} WHERE id = ?) = 0)))";
} else {
$trackingsql = "AND (f.trackingtype = ".FORUM_TRACKING_OFF."
OR ((f.trackingtype = ".FORUM_TRACKING_OPTIONAL." OR f.trackingtype = ".FORUM_TRACKING_FORCED.")
AND (ft.id IS NOT NULL
OR (SELECT trackforums FROM {user} WHERE id = ?) = 0)))";
}
$sql = "SELECT f.id
FROM {forum} f
LEFT JOIN {forum_track_prefs} ft ON (ft.forumid = f.id AND ft.userid = ?)
WHERE f.course = ?
$trackingsql";
if ($forums = $DB->get_records_sql($sql, array($userid, $courseid, $userid))) {
foreach ($forums as $forum) {
$forums[$forum->id] = $forum;
}
return $forums;
} else {
return array();
}
}
/**
* Determine if a user can track forums and optionally a particular forum.
* Checks the site settings, the user settings and the forum settings (if
* requested).
*
* @global object
* @global object
* @global object
* @param mixed $forum The forum object to test, or the int id (optional).
* @param mixed $userid The user object to check for (optional).
* @return boolean
*/
function forum_tp_can_track_forums($forum=false, $user=false) {
global $USER, $CFG, $DB;
// if possible, avoid expensive
// queries
if (empty($CFG->forum_trackreadposts)) {
return false;
}
if ($user === false) {
$user = $USER;
}
if (isguestuser($user) or empty($user->id)) {
return false;
}
if ($forum === false) {
if ($CFG->forum_allowforcedreadtracking) {
// Since we can force tracking, assume yes without a specific forum.
return true;
} else {
return (bool)$user->trackforums;
}
}
// Work toward always passing an object...
if (is_numeric($forum)) {
debugging('Better use proper forum object.', DEBUG_DEVELOPER);
$forum = $DB->get_record('forum', array('id' => $forum), '', 'id,trackingtype');
}
$forumallows = ($forum->trackingtype == FORUM_TRACKING_OPTIONAL);
$forumforced = ($forum->trackingtype == FORUM_TRACKING_FORCED);
if ($CFG->forum_allowforcedreadtracking) {
// If we allow forcing, then forced forums takes procidence over user setting.
return ($forumforced || ($forumallows && (!empty($user->trackforums) && (bool)$user->trackforums)));
} else {
// If we don't allow forcing, user setting trumps.
return ($forumforced || $forumallows) && !empty($user->trackforums);
}
}
/**
* Tells whether a specific forum is tracked by the user. A user can optionally
* be specified. If not specified, the current user is assumed.
*
* @global object
* @global object
* @global object
* @param mixed $forum If int, the id of the forum being checked; if object, the forum object
* @param int $userid The id of the user being checked (optional).
* @return boolean
*/
function forum_tp_is_tracked($forum, $user=false) {
global $USER, $CFG, $DB;
if ($user === false) {
$user = $USER;
}
if (isguestuser($user) or empty($user->id)) {
return false;
}
$cache = cache::make('mod_forum', 'forum_is_tracked');
$forumid = is_numeric($forum) ? $forum : $forum->id;
$key = $forumid . '_' . $user->id;
if ($cachedvalue = $cache->get($key)) {
return $cachedvalue == 'tracked';
}
// Work toward always passing an object...
if (is_numeric($forum)) {
debugging('Better use proper forum object.', DEBUG_DEVELOPER);
$forum = $DB->get_record('forum', array('id' => $forum));
}
if (!forum_tp_can_track_forums($forum, $user)) {
return false;
}
$forumallows = ($forum->trackingtype == FORUM_TRACKING_OPTIONAL);
$forumforced = ($forum->trackingtype == FORUM_TRACKING_FORCED);
$userpref = $DB->get_record('forum_track_prefs', array('userid' => $user->id, 'forumid' => $forum->id));
if ($CFG->forum_allowforcedreadtracking) {
$istracked = $forumforced || ($forumallows && $userpref === false);
} else {
$istracked = ($forumallows || $forumforced) && $userpref === false;
}
// We have to store a string here because the cache API returns false
// when it can't find the key which would be confused with our legitimate
// false value. *sigh*.
$cache->set($key, $istracked ? 'tracked' : 'not');
return $istracked;
}
/**
* @global object
* @global object
* @param int $forumid
* @param int $userid
*/
function forum_tp_start_tracking($forumid, $userid=false) {
global $USER, $DB;
if ($userid === false) {
$userid = $USER->id;
}
return $DB->delete_records('forum_track_prefs', array('userid' => $userid, 'forumid' => $forumid));
}
/**
* @global object
* @global object
* @param int $forumid
* @param int $userid
*/
function forum_tp_stop_tracking($forumid, $userid=false) {
global $USER, $DB;
if ($userid === false) {
$userid = $USER->id;
}
if (!$DB->record_exists('forum_track_prefs', array('userid' => $userid, 'forumid' => $forumid))) {
$track_prefs = new stdClass();
$track_prefs->userid = $userid;
$track_prefs->forumid = $forumid;
$DB->insert_record('forum_track_prefs', $track_prefs);
}
return forum_tp_delete_read_records($userid, -1, -1, $forumid);
}
/**
* Clean old records from the forum_read table.
* @global object
* @global object
* @return void
*/
function forum_tp_clean_read_records() {
global $CFG, $DB;
if (!isset($CFG->forum_oldpostdays)) {
return;
}
// Look for records older than the cutoffdate that are still in the forum_read table.
$cutoffdate = time() - ($CFG->forum_oldpostdays*24*60*60);
//first get the oldest tracking present - we need tis to speedup the next delete query
$sql = "SELECT MIN(fp.modified) AS first
FROM {forum_posts} fp
JOIN {forum_read} fr ON fr.postid=fp.id";
if (!$first = $DB->get_field_sql($sql)) {
// nothing to delete;
return;
}
// now delete old tracking info
$sql = "DELETE
FROM {forum_read}
WHERE postid IN (SELECT fp.id
FROM {forum_posts} fp
WHERE fp.modified >= ? AND fp.modified < ?)";
$DB->execute($sql, array($first, $cutoffdate));
}
/**
* Sets the last post for a given discussion
*
* @global object
* @global object
* @param into $discussionid
* @return bool|int
**/
function forum_discussion_update_last_post($discussionid) {
global $CFG, $DB;
// Check the given discussion exists
if (!$DB->record_exists('forum_discussions', array('id' => $discussionid))) {
return false;
}
// Use SQL to find the last post for this discussion
$sql = "SELECT id, userid, modified
FROM {forum_posts}
WHERE discussion=?
ORDER BY modified DESC";
// Lets go find the last post
if (($lastposts = $DB->get_records_sql($sql, array($discussionid), 0, 1))) {
$lastpost = reset($lastposts);
$discussionobject = new stdClass();
$discussionobject->id = $discussionid;
$discussionobject->usermodified = $lastpost->userid;
$discussionobject->timemodified = $lastpost->modified;
$DB->update_record('forum_discussions', $discussionobject);
return $lastpost->id;
}
// To get here either we couldn't find a post for the discussion (weird)
// or we couldn't update the discussion record (weird x2)
return false;
}
/**
* List the actions that correspond to a view of this module.
* This is used by the participation report.
*
* Note: This is not used by new logging system. Event with
* crud = 'r' and edulevel = LEVEL_PARTICIPATING will
* be considered as view action.
*
* @return array
*/
function forum_get_view_actions() {
return array('view discussion', 'search', 'forum', 'forums', 'subscribers', 'view forum');
}
/**
* List the options for forum subscription modes.
* This is used by the settings page and by the mod_form page.
*
* @return array
*/
function forum_get_subscriptionmode_options() {
$options = array();
$options[FORUM_CHOOSESUBSCRIBE] = get_string('subscriptionoptional', 'forum');
$options[FORUM_FORCESUBSCRIBE] = get_string('subscriptionforced', 'forum');
$options[FORUM_INITIALSUBSCRIBE] = get_string('subscriptionauto', 'forum');
$options[FORUM_DISALLOWSUBSCRIBE] = get_string('subscriptiondisabled', 'forum');
return $options;
}
/**
* List the actions that correspond to a post of this module.
* This is used by the participation report.
*
* Note: This is not used by new logging system. Event with
* crud = ('c' || 'u' || 'd') and edulevel = LEVEL_PARTICIPATING
* will be considered as post action.
*
* @return array
*/
function forum_get_post_actions() {
return array('add discussion','add post','delete discussion','delete post','move discussion','prune post','update post');
}
/**
* Returns a warning object if a user has reached the number of posts equal to
* the warning/blocking setting, or false if there is no warning to show.
*
* @param int|stdClass $forum the forum id or the forum object
* @param stdClass $cm the course module
* @return stdClass|bool returns an object with the warning information, else
* returns false if no warning is required.
*/
function forum_check_throttling($forum, $cm = null) {
global $CFG, $DB, $USER;
if (is_numeric($forum)) {
$forum = $DB->get_record('forum', array('id' => $forum), '*', MUST_EXIST);
}
if (!is_object($forum)) {
return false; // This is broken.
}
if (!$cm) {
$cm = get_coursemodule_from_instance('forum', $forum->id, $forum->course, false, MUST_EXIST);
}
if (empty($forum->blockafter)) {
return false;
}
if (empty($forum->blockperiod)) {
return false;
}
$modcontext = context_module::instance($cm->id);
if (has_capability('mod/forum:postwithoutthrottling', $modcontext)) {
return false;
}
// Get the number of posts in the last period we care about.
$timenow = time();
$timeafter = $timenow - $forum->blockperiod;
$numposts = $DB->count_records_sql('SELECT COUNT(p.id) FROM {forum_posts} p
JOIN {forum_discussions} d
ON p.discussion = d.id WHERE d.forum = ?
AND p.userid = ? AND p.created > ?', array($forum->id, $USER->id, $timeafter));
$a = new stdClass();
$a->blockafter = $forum->blockafter;
$a->numposts = $numposts;
$a->blockperiod = get_string('secondstotime'.$forum->blockperiod);
if ($forum->blockafter <= $numposts) {
$warning = new stdClass();
$warning->canpost = false;
$warning->errorcode = 'forumblockingtoomanyposts';
$warning->module = 'error';
$warning->additional = $a;
$warning->link = $CFG->wwwroot . '/mod/forum/view.php?f=' . $forum->id;
return $warning;
}
if ($forum->warnafter <= $numposts) {
$warning = new stdClass();
$warning->canpost = true;
$warning->errorcode = 'forumblockingalmosttoomanyposts';
$warning->module = 'forum';
$warning->additional = $a;
$warning->link = null;
return $warning;
}
}
/**
* Throws an error if the user is no longer allowed to post due to having reached
* or exceeded the number of posts specified in 'Post threshold for blocking'
* setting.
*
* @since Moodle 2.5
* @param stdClass $thresholdwarning the warning information returned
* from the function forum_check_throttling.
*/
function forum_check_blocking_threshold($thresholdwarning) {
if (!empty($thresholdwarning) && !$thresholdwarning->canpost) {
print_error($thresholdwarning->errorcode,
$thresholdwarning->module,
$thresholdwarning->link,
$thresholdwarning->additional);
}
}
/**
* Removes all grades from gradebook
*
* @global object
* @global object
* @param int $courseid
* @param string $type optional
*/
function forum_reset_gradebook($courseid, $type='') {
global $CFG, $DB;
$wheresql = '';
$params = array($courseid);
if ($type) {
$wheresql = "AND f.type=?";
$params[] = $type;
}
$sql = "SELECT f.*, cm.idnumber as cmidnumber, f.course as courseid
FROM {forum} f, {course_modules} cm, {modules} m
WHERE m.name='forum' AND m.id=cm.module AND cm.instance=f.id AND f.course=? $wheresql";
if ($forums = $DB->get_records_sql($sql, $params)) {
foreach ($forums as $forum) {
forum_grade_item_update($forum, 'reset', 'reset');
}
}
}
/**
* This function is used by the reset_course_userdata function in moodlelib.
* This function will remove all posts from the specified forum
* and clean up any related data.
*
* @global object
* @global object
* @param $data the data submitted from the reset course.
* @return array status array
*/
function forum_reset_userdata($data) {
global $CFG, $DB;
require_once($CFG->dirroot.'/rating/lib.php');
$componentstr = get_string('modulenameplural', 'forum');
$status = array();
$params = array($data->courseid);
$removeposts = false;
$typesql = "";
if (!empty($data->reset_forum_all)) {
$removeposts = true;
$typesstr = get_string('resetforumsall', 'forum');
$types = array();
} else if (!empty($data->reset_forum_types)){
$removeposts = true;
$types = array();
$sqltypes = array();
$forum_types_all = forum_get_forum_types_all();
foreach ($data->reset_forum_types as $type) {
if (!array_key_exists($type, $forum_types_all)) {
continue;
}
$types[] = $forum_types_all[$type];
$sqltypes[] = $type;
}
if (!empty($sqltypes)) {
list($typesql, $typeparams) = $DB->get_in_or_equal($sqltypes);
$typesql = " AND f.type " . $typesql;
$params = array_merge($params, $typeparams);
}
$typesstr = get_string('resetforums', 'forum').': '.implode(', ', $types);
}
$alldiscussionssql = "SELECT fd.id
FROM {forum_discussions} fd, {forum} f
WHERE f.course=? AND f.id=fd.forum";
$allforumssql = "SELECT f.id
FROM {forum} f
WHERE f.course=?";
$allpostssql = "SELECT fp.id
FROM {forum_posts} fp, {forum_discussions} fd, {forum} f
WHERE f.course=? AND f.id=fd.forum AND fd.id=fp.discussion";
$forumssql = $forums = $rm = null;
// Check if we need to get additional data.
if ($removeposts || !empty($data->reset_forum_ratings) || !empty($data->reset_forum_tags)) {
// Set this up if we have to remove ratings.
$rm = new rating_manager();
$ratingdeloptions = new stdClass;
$ratingdeloptions->component = 'mod_forum';
$ratingdeloptions->ratingarea = 'post';
// Get the forums for actions that require it.
$forumssql = "$allforumssql $typesql";
$forums = $DB->get_records_sql($forumssql, $params);
}
if ($removeposts) {
$discussionssql = "$alldiscussionssql $typesql";
$postssql = "$allpostssql $typesql";
// now get rid of all attachments
$fs = get_file_storage();
if ($forums) {
foreach ($forums as $forumid=>$unused) {
if (!$cm = get_coursemodule_from_instance('forum', $forumid)) {
continue;
}
$context = context_module::instance($cm->id);
$fs->delete_area_files($context->id, 'mod_forum', 'attachment');
$fs->delete_area_files($context->id, 'mod_forum', 'post');
//remove ratings
$ratingdeloptions->contextid = $context->id;
$rm->delete_ratings($ratingdeloptions);
core_tag_tag::delete_instances('mod_forum', null, $context->id);
}
}
// first delete all read flags
$DB->delete_records_select('forum_read', "forumid IN ($forumssql)", $params);
// remove tracking prefs
$DB->delete_records_select('forum_track_prefs', "forumid IN ($forumssql)", $params);
// remove posts from queue
$DB->delete_records_select('forum_queue', "discussionid IN ($discussionssql)", $params);
// all posts - initial posts must be kept in single simple discussion forums
$DB->delete_records_select('forum_posts', "discussion IN ($discussionssql) AND parent <> 0", $params); // first all children
$DB->delete_records_select('forum_posts', "discussion IN ($discussionssql AND f.type <> 'single') AND parent = 0", $params); // now the initial posts for non single simple
// finally all discussions except single simple forums
$DB->delete_records_select('forum_discussions', "forum IN ($forumssql AND f.type <> 'single')", $params);
// remove all grades from gradebook
if (empty($data->reset_gradebook_grades)) {
if (empty($types)) {
forum_reset_gradebook($data->courseid);
} else {
foreach ($types as $type) {
forum_reset_gradebook($data->courseid, $type);
}
}
}
$status[] = array('component'=>$componentstr, 'item'=>$typesstr, 'error'=>false);
}
// remove all ratings in this course's forums
if (!empty($data->reset_forum_ratings)) {
if ($forums) {
foreach ($forums as $forumid=>$unused) {
if (!$cm = get_coursemodule_from_instance('forum', $forumid)) {
continue;
}
$context = context_module::instance($cm->id);
//remove ratings
$ratingdeloptions->contextid = $context->id;
$rm->delete_ratings($ratingdeloptions);
}
}
// remove all grades from gradebook
if (empty($data->reset_gradebook_grades)) {
forum_reset_gradebook($data->courseid);
}
}
// Remove all the tags.
if (!empty($data->reset_forum_tags)) {
if ($forums) {
foreach ($forums as $forumid => $unused) {
if (!$cm = get_coursemodule_from_instance('forum', $forumid)) {
continue;
}
$context = context_module::instance($cm->id);
core_tag_tag::delete_instances('mod_forum', null, $context->id);
}
}
$status[] = array('component' => $componentstr, 'item' => get_string('tagsdeleted', 'forum'), 'error' => false);
}
// remove all digest settings unconditionally - even for users still enrolled in course.
if (!empty($data->reset_forum_digests)) {
$DB->delete_records_select('forum_digests', "forum IN ($allforumssql)", $params);
$status[] = array('component' => $componentstr, 'item' => get_string('resetdigests', 'forum'), 'error' => false);
}
// remove all subscriptions unconditionally - even for users still enrolled in course
if (!empty($data->reset_forum_subscriptions)) {
$DB->delete_records_select('forum_subscriptions', "forum IN ($allforumssql)", $params);
$DB->delete_records_select('forum_discussion_subs', "forum IN ($allforumssql)", $params);
$status[] = array('component' => $componentstr, 'item' => get_string('resetsubscriptions', 'forum'), 'error' => false);
}
// remove all tracking prefs unconditionally - even for users still enrolled in course
if (!empty($data->reset_forum_track_prefs)) {
$DB->delete_records_select('forum_track_prefs', "forumid IN ($allforumssql)", $params);
$status[] = array('component'=>$componentstr, 'item'=>get_string('resettrackprefs','forum'), 'error'=>false);
}
/// updating dates - shift may be negative too
if ($data->timeshift) {
// Any changes to the list of dates that needs to be rolled should be same during course restore and course reset.
// See MDL-9367.
shift_course_mod_dates('forum', array('assesstimestart', 'assesstimefinish'), $data->timeshift, $data->courseid);
$status[] = array('component'=>$componentstr, 'item'=>get_string('datechanged'), 'error'=>false);
}
return $status;
}
/**
* Called by course/reset.php
*
* @param $mform form passed by reference
*/
function forum_reset_course_form_definition(&$mform) {
$mform->addElement('header', 'forumheader', get_string('modulenameplural', 'forum'));
$mform->addElement('checkbox', 'reset_forum_all', get_string('resetforumsall','forum'));
$mform->addElement('select', 'reset_forum_types', get_string('resetforums', 'forum'), forum_get_forum_types_all(), array('multiple' => 'multiple'));
$mform->setAdvanced('reset_forum_types');
$mform->disabledIf('reset_forum_types', 'reset_forum_all', 'checked');
$mform->addElement('checkbox', 'reset_forum_digests', get_string('resetdigests','forum'));
$mform->setAdvanced('reset_forum_digests');
$mform->addElement('checkbox', 'reset_forum_subscriptions', get_string('resetsubscriptions','forum'));
$mform->setAdvanced('reset_forum_subscriptions');
$mform->addElement('checkbox', 'reset_forum_track_prefs', get_string('resettrackprefs','forum'));
$mform->setAdvanced('reset_forum_track_prefs');
$mform->disabledIf('reset_forum_track_prefs', 'reset_forum_all', 'checked');
$mform->addElement('checkbox', 'reset_forum_ratings', get_string('deleteallratings'));
$mform->disabledIf('reset_forum_ratings', 'reset_forum_all', 'checked');
$mform->addElement('checkbox', 'reset_forum_tags', get_string('removeallforumtags', 'forum'));
$mform->disabledIf('reset_forum_tags', 'reset_forum_all', 'checked');
}
/**
* Course reset form defaults.
* @return array
*/
function forum_reset_course_form_defaults($course) {
return array('reset_forum_all'=>1, 'reset_forum_digests' => 0, 'reset_forum_subscriptions'=>0, 'reset_forum_track_prefs'=>0, 'reset_forum_ratings'=>1);
}
/**
* Returns array of forum layout modes
*
* @param bool $useexperimentalui use experimental layout modes or not
* @return array
*/
function forum_get_layout_modes(bool $useexperimentalui = false) {
$modes = [
FORUM_MODE_FLATOLDEST => get_string('modeflatoldestfirst', 'forum'),
FORUM_MODE_FLATNEWEST => get_string('modeflatnewestfirst', 'forum'),
FORUM_MODE_THREADED => get_string('modethreaded', 'forum')
];
if ($useexperimentalui) {
$modes[FORUM_MODE_NESTED_V2] = get_string('modenestedv2', 'forum');
} else {
$modes[FORUM_MODE_NESTED] = get_string('modenested', 'forum');
}
return $modes;
}
/**
* Returns array of forum types chooseable on the forum editing form
*
* @return array
*/
function forum_get_forum_types() {
return array ('general' => get_string('generalforum', 'forum'),
'eachuser' => get_string('eachuserforum', 'forum'),
'single' => get_string('singleforum', 'forum'),
'qanda' => get_string('qandaforum', 'forum'),
'blog' => get_string('blogforum', 'forum'));
}
/**
* Returns array of all forum layout modes
*
* @return array
*/
function forum_get_forum_types_all() {
return array ('news' => get_string('namenews','forum'),
'social' => get_string('namesocial','forum'),
'general' => get_string('generalforum', 'forum'),
'eachuser' => get_string('eachuserforum', 'forum'),
'single' => get_string('singleforum', 'forum'),
'qanda' => get_string('qandaforum', 'forum'),
'blog' => get_string('blogforum', 'forum'));
}
/**
* Returns all other caps used in module
*
* @return array
*/
function forum_get_extra_capabilities() {
return ['moodle/rating:view', 'moodle/rating:viewany', 'moodle/rating:viewall', 'moodle/rating:rate'];
}
/**
* Adds module specific settings to the settings block
*
* @param settings_navigation $settings The settings navigation object
* @param navigation_node $forumnode The node to add module settings to
*/
function forum_extend_settings_navigation(settings_navigation $settingsnav, navigation_node $forumnode) {
global $USER, $PAGE, $CFG, $DB, $OUTPUT;
if (empty($PAGE->cm->context)) {
$PAGE->cm->context = context_module::instance($PAGE->cm->instance);
}
$vaultfactory = mod_forum\local\container::get_vault_factory();
$managerfactory = mod_forum\local\container::get_manager_factory();
$legacydatamapperfactory = mod_forum\local\container::get_legacy_data_mapper_factory();
$forumvault = $vaultfactory->get_forum_vault();
$forumentity = $forumvault->get_from_id($PAGE->cm->instance);
$forumobject = $legacydatamapperfactory->get_forum_data_mapper()->to_legacy_object($forumentity);
$params = $PAGE->url->params();
if (!empty($params['d'])) {
$discussionid = $params['d'];
}
// Display all forum reports user has access to.
if (isloggedin() && !isguestuser()) {
$reportnames = array_keys(core_component::get_plugin_list('forumreport'));
foreach ($reportnames as $reportname) {
if (has_capability("forumreport/{$reportname}:view", $PAGE->cm->context)) {
$reportlinkparams = [
'courseid' => $forumobject->course,
'forumid' => $forumobject->id,
];
$reportlink = new moodle_url("/mod/forum/report/{$reportname}/index.php", $reportlinkparams);
$forumnode->add(get_string('nodetitle', "forumreport_{$reportname}"), $reportlink, navigation_node::TYPE_CONTAINER);
}
}
}
// For some actions you need to be enrolled, being admin is not enough sometimes here.
$enrolled = is_enrolled($PAGE->cm->context, $USER, '', false);
$activeenrolled = is_enrolled($PAGE->cm->context, $USER, '', true);
$canmanage = has_capability('mod/forum:managesubscriptions', $PAGE->cm->context);
$subscriptionmode = \mod_forum\subscriptions::get_subscription_mode($forumobject);
$cansubscribe = $activeenrolled && !\mod_forum\subscriptions::is_forcesubscribed($forumobject) &&
(!\mod_forum\subscriptions::subscription_disabled($forumobject) || $canmanage);
if ($canmanage) {
$mode = $forumnode->add(get_string('subscriptionmode', 'forum'), null, navigation_node::TYPE_CONTAINER);
$mode->add_class('subscriptionmode');
// Optional subscription mode.
$allowchoicestring = get_string('subscriptionoptional', 'forum');
$allowchoiceaction = new action_link(
new moodle_url('/mod/forum/subscribe.php', [
'id' => $forumobject->id,
'mode' => FORUM_CHOOSESUBSCRIBE,
'sesskey' => sesskey(),
]),
$allowchoicestring,
new confirm_action(get_string('subscriptionmodeconfirm', 'mod_forum', $allowchoicestring))
);
$allowchoice = $mode->add($allowchoicestring, $allowchoiceaction, navigation_node::TYPE_SETTING);
// Forced subscription mode.
$forceforeverstring = get_string('subscriptionforced', 'forum');
$forceforeveraction = new action_link(
new moodle_url('/mod/forum/subscribe.php', [
'id' => $forumobject->id,
'mode' => FORUM_FORCESUBSCRIBE,
'sesskey' => sesskey(),
]),
$forceforeverstring,
new confirm_action(get_string('subscriptionmodeconfirm', 'mod_forum', $forceforeverstring))
);
$forceforever = $mode->add($forceforeverstring, $forceforeveraction, navigation_node::TYPE_SETTING);
// Initial subscription mode.
$forceinitiallystring = get_string('subscriptionauto', 'forum');
$forceinitiallyaction = new action_link(
new moodle_url('/mod/forum/subscribe.php', [
'id' => $forumobject->id,
'mode' => FORUM_INITIALSUBSCRIBE,
'sesskey' => sesskey(),
]),
$forceinitiallystring,
new confirm_action(get_string('subscriptionmodeconfirm', 'mod_forum', $forceinitiallystring))
);
$forceinitially = $mode->add($forceinitiallystring, $forceinitiallyaction, navigation_node::TYPE_SETTING);
// Disabled subscription mode.
$disallowchoicestring = get_string('subscriptiondisabled', 'forum');
$disallowchoiceaction = new action_link(
new moodle_url('/mod/forum/subscribe.php', [
'id' => $forumobject->id,
'mode' => FORUM_DISALLOWSUBSCRIBE,
'sesskey' => sesskey(),
]),
$disallowchoicestring,
new confirm_action(get_string('subscriptionmodeconfirm', 'mod_forum', $disallowchoicestring))
);
$disallowchoice = $mode->add($disallowchoicestring, $disallowchoiceaction, navigation_node::TYPE_SETTING);
switch ($subscriptionmode) {
case FORUM_CHOOSESUBSCRIBE : // 0
$allowchoice->action = null;
$allowchoice->add_class('activesetting');
$allowchoice->icon = new pix_icon('t/selected', '', 'mod_forum');
break;
case FORUM_FORCESUBSCRIBE : // 1
$forceforever->action = null;
$forceforever->add_class('activesetting');
$forceforever->icon = new pix_icon('t/selected', '', 'mod_forum');
break;
case FORUM_INITIALSUBSCRIBE : // 2
$forceinitially->action = null;
$forceinitially->add_class('activesetting');
$forceinitially->icon = new pix_icon('t/selected', '', 'mod_forum');
break;
case FORUM_DISALLOWSUBSCRIBE : // 3
$disallowchoice->action = null;
$disallowchoice->add_class('activesetting');
$disallowchoice->icon = new pix_icon('t/selected', '', 'mod_forum');
break;
}
} else if ($activeenrolled) {
switch ($subscriptionmode) {
case FORUM_CHOOSESUBSCRIBE : // 0
$notenode = $forumnode->add(get_string('subscriptionoptional', 'forum'));
break;
case FORUM_FORCESUBSCRIBE : // 1
$notenode = $forumnode->add(get_string('subscriptionforced', 'forum'));
break;
case FORUM_INITIALSUBSCRIBE : // 2
$notenode = $forumnode->add(get_string('subscriptionauto', 'forum'));
break;
case FORUM_DISALLOWSUBSCRIBE : // 3
$notenode = $forumnode->add(get_string('subscriptiondisabled', 'forum'));
break;
}
}
if ($cansubscribe) {
if (\mod_forum\subscriptions::is_subscribed($USER->id, $forumobject, null, $PAGE->cm)) {
$linktext = get_string('unsubscribe', 'forum');
} else {
$linktext = get_string('subscribe', 'forum');
}
$url = new moodle_url('/mod/forum/subscribe.php', array('id'=>$forumobject->id, 'sesskey'=>sesskey()));
$forumnode->add($linktext, $url, navigation_node::TYPE_SETTING);
if (isset($discussionid)) {
if (\mod_forum\subscriptions::is_subscribed($USER->id, $forumobject, $discussionid, $PAGE->cm)) {
$linktext = get_string('unsubscribediscussion', 'forum');
} else {
$linktext = get_string('subscribediscussion', 'forum');
}
$url = new moodle_url('/mod/forum/subscribe.php', array(
'id' => $forumobject->id,
'sesskey' => sesskey(),
'd' => $discussionid,
'returnurl' => $PAGE->url->out(),
));
$forumnode->add($linktext, $url, navigation_node::TYPE_SETTING);
}
}
if (has_capability('mod/forum:viewsubscribers', $PAGE->cm->context)){
$url = new moodle_url('/mod/forum/subscribers.php', array('id'=>$forumobject->id));
$forumnode->add(get_string('showsubscribers', 'forum'), $url, navigation_node::TYPE_SETTING);
}
if ($enrolled && forum_tp_can_track_forums($forumobject)) { // keep tracking info for users with suspended enrolments
if ($forumobject->trackingtype == FORUM_TRACKING_OPTIONAL
|| ((!$CFG->forum_allowforcedreadtracking) && $forumobject->trackingtype == FORUM_TRACKING_FORCED)) {
if (forum_tp_is_tracked($forumobject)) {
$linktext = get_string('notrackforum', 'forum');
} else {
$linktext = get_string('trackforum', 'forum');
}
$url = new moodle_url('/mod/forum/settracking.php', array(
'id' => $forumobject->id,
'sesskey' => sesskey(),
));
$forumnode->add($linktext, $url, navigation_node::TYPE_SETTING);
}
}
if (!isloggedin() && $PAGE->course->id == SITEID) {
$userid = guest_user()->id;
} else {
$userid = $USER->id;
}
$hascourseaccess = ($PAGE->course->id == SITEID) || can_access_course($PAGE->course, $userid);
$enablerssfeeds = !empty($CFG->enablerssfeeds) && !empty($CFG->forum_enablerssfeeds);
if ($enablerssfeeds && $forumobject->rsstype && $forumobject->rssarticles && $hascourseaccess) {
if (!function_exists('rss_get_url')) {
require_once("$CFG->libdir/rsslib.php");
}
if ($forumobject->rsstype == 1) {
$string = get_string('rsssubscriberssdiscussions','forum');
} else {
$string = get_string('rsssubscriberssposts','forum');
}
$url = new moodle_url(rss_get_url($PAGE->cm->context->id, $userid, "mod_forum", $forumobject->id));
$forumnode->add($string, $url, settings_navigation::TYPE_SETTING, null, null, new pix_icon('i/rss', ''));
}
$capabilitymanager = $managerfactory->get_capability_manager($forumentity);
if ($capabilitymanager->can_export_forum($USER)) {
$url = new moodle_url('/mod/forum/export.php', ['id' => $forumobject->id]);
$forumnode->add(get_string('export', 'mod_forum'), $url, navigation_node::TYPE_SETTING);
}
}
/**
* Adds information about unread messages, that is only required for the course view page (and
* similar), to the course-module object.
* @param cm_info $cm Course-module object
*/
function forum_cm_info_view(cm_info $cm) {
global $CFG;
if (forum_tp_can_track_forums()) {
if ($unread = forum_tp_count_forum_unread_posts($cm, $cm->get_course())) {
$out = '<span class="unread"> <a href="' . $cm->url . '#unread">';
if ($unread == 1) {
$out .= get_string('unreadpostsone', 'forum');
} else {
$out .= get_string('unreadpostsnumber', 'forum', $unread);
}
$out .= '</a></span>';
$cm->set_after_link($out);
}
}
}
/**
* Return a list of page types
* @param string $pagetype current page type
* @param stdClass $parentcontext Block's parent context
* @param stdClass $currentcontext Current context of block
*/
function forum_page_type_list($pagetype, $parentcontext, $currentcontext) {
$forum_pagetype = array(
'mod-forum-*'=>get_string('page-mod-forum-x', 'forum'),
'mod-forum-view'=>get_string('page-mod-forum-view', 'forum'),
'mod-forum-discuss'=>get_string('page-mod-forum-discuss', 'forum')
);
return $forum_pagetype;
}
/**
* Gets all of the courses where the provided user has posted in a forum.
*
* @global moodle_database $DB The database connection
* @param stdClass $user The user who's posts we are looking for
* @param bool $discussionsonly If true only look for discussions started by the user
* @param bool $includecontexts If set to trye contexts for the courses will be preloaded
* @param int $limitfrom The offset of records to return
* @param int $limitnum The number of records to return
* @return array An array of courses
*/
function forum_get_courses_user_posted_in($user, $discussionsonly = false, $includecontexts = true, $limitfrom = null, $limitnum = null) {
global $DB;
// If we are only after discussions we need only look at the forum_discussions
// table and join to the userid there. If we are looking for posts then we need
// to join to the forum_posts table.
if (!$discussionsonly) {
$subquery = "(SELECT DISTINCT fd.course
FROM {forum_discussions} fd
JOIN {forum_posts} fp ON fp.discussion = fd.id
WHERE fp.userid = :userid )";
} else {
$subquery= "(SELECT DISTINCT fd.course
FROM {forum_discussions} fd
WHERE fd.userid = :userid )";
}
$params = array('userid' => $user->id);
// Join to the context table so that we can preload contexts if required.
if ($includecontexts) {
$ctxselect = ', ' . context_helper::get_preload_record_columns_sql('ctx');
$ctxjoin = "LEFT JOIN {context} ctx ON (ctx.instanceid = c.id AND ctx.contextlevel = :contextlevel)";
$params['contextlevel'] = CONTEXT_COURSE;
} else {
$ctxselect = '';
$ctxjoin = '';
}
// Now we need to get all of the courses to search.
// All courses where the user has posted within a forum will be returned.
$sql = "SELECT c.* $ctxselect
FROM {course} c
$ctxjoin
WHERE c.id IN ($subquery)";
$courses = $DB->get_records_sql($sql, $params, $limitfrom, $limitnum);
if ($includecontexts) {
array_map('context_helper::preload_from_record', $courses);
}
return $courses;
}
/**
* Gets all of the forums a user has posted in for one or more courses.
*
* @global moodle_database $DB
* @param stdClass $user
* @param array $courseids An array of courseids to search or if not provided
* all courses the user has posted within
* @param bool $discussionsonly If true then only forums where the user has started
* a discussion will be returned.
* @param int $limitfrom The offset of records to return
* @param int $limitnum The number of records to return
* @return array An array of forums the user has posted within in the provided courses
*/
function forum_get_forums_user_posted_in($user, array $courseids = null, $discussionsonly = false, $limitfrom = null, $limitnum = null) {
global $DB;
if (!is_null($courseids)) {
list($coursewhere, $params) = $DB->get_in_or_equal($courseids, SQL_PARAMS_NAMED, 'courseid');
$coursewhere = ' AND f.course '.$coursewhere;
} else {
$coursewhere = '';
$params = array();
}
$params['userid'] = $user->id;
$params['forum'] = 'forum';
if ($discussionsonly) {
$join = 'JOIN {forum_discussions} ff ON ff.forum = f.id';
} else {
$join = 'JOIN {forum_discussions} fd ON fd.forum = f.id
JOIN {forum_posts} ff ON ff.discussion = fd.id';
}
$sql = "SELECT f.*, cm.id AS cmid
FROM {forum} f
JOIN {course_modules} cm ON cm.instance = f.id
JOIN {modules} m ON m.id = cm.module
JOIN (
SELECT f.id
FROM {forum} f
{$join}
WHERE ff.userid = :userid
GROUP BY f.id
) j ON j.id = f.id
WHERE m.name = :forum
{$coursewhere}";
$courseforums = $DB->get_records_sql($sql, $params, $limitfrom, $limitnum);
return $courseforums;
}
/**
* Returns posts made by the selected user in the requested courses.
*
* This method can be used to return all of the posts made by the requested user
* within the given courses.
* For each course the access of the current user and requested user is checked
* and then for each post access to the post and forum is checked as well.
*
* This function is safe to use with usercapabilities.
*
* @global moodle_database $DB
* @param stdClass $user The user whose posts we want to get
* @param array $courses The courses to search
* @param bool $musthaveaccess If set to true errors will be thrown if the user
* cannot access one or more of the courses to search
* @param bool $discussionsonly If set to true only discussion starting posts
* will be returned.
* @param int $limitfrom The offset of records to return
* @param int $limitnum The number of records to return
* @return stdClass An object the following properties
* ->totalcount: the total number of posts made by the requested user
* that the current user can see.
* ->courses: An array of courses the current user can see that the
* requested user has posted in.
* ->forums: An array of forums relating to the posts returned in the
* property below.
* ->posts: An array containing the posts to show for this request.
*/
function forum_get_posts_by_user($user, array $courses, $musthaveaccess = false, $discussionsonly = false, $limitfrom = 0, $limitnum = 50) {
global $DB, $USER, $CFG;
$return = new stdClass;
$return->totalcount = 0; // The total number of posts that the current user is able to view
$return->courses = array(); // The courses the current user can access
$return->forums = array(); // The forums that the current user can access that contain posts
$return->posts = array(); // The posts to display
// First up a small sanity check. If there are no courses to check we can
// return immediately, there is obviously nothing to search.
if (empty($courses)) {
return $return;
}
// A couple of quick setups
$isloggedin = isloggedin();
$isguestuser = $isloggedin && isguestuser();
$iscurrentuser = $isloggedin && $USER->id == $user->id;
// Checkout whether or not the current user has capabilities over the requested
// user and if so they have the capabilities required to view the requested
// users content.
$usercontext = context_user::instance($user->id, MUST_EXIST);
$hascapsonuser = !$iscurrentuser && $DB->record_exists('role_assignments', array('userid' => $USER->id, 'contextid' => $usercontext->id));
$hascapsonuser = $hascapsonuser && has_all_capabilities(array('moodle/user:viewdetails', 'moodle/user:readuserposts'), $usercontext);
// Before we actually search each course we need to check the user's access to the
// course. If the user doesn't have the appropraite access then we either throw an
// error if a particular course was requested or we just skip over the course.
foreach ($courses as $course) {
$coursecontext = context_course::instance($course->id, MUST_EXIST);
if ($iscurrentuser || $hascapsonuser) {
// If it is the current user, or the current user has capabilities to the
// requested user then all we need to do is check the requested users
// current access to the course.
// Note: There is no need to check group access or anything of the like
// as either the current user is the requested user, or has granted
// capabilities on the requested user. Either way they can see what the
// requested user posted, although its VERY unlikely in the `parent` situation
// that the current user will be able to view the posts in context.
if (!is_viewing($coursecontext, $user) && !is_enrolled($coursecontext, $user)) {
// Need to have full access to a course to see the rest of own info
if ($musthaveaccess) {
print_error('errorenrolmentrequired', 'forum');
}
continue;
}
} else {
// Check whether the current user is enrolled or has access to view the course
// if they don't we immediately have a problem.
if (!can_access_course($course)) {
if ($musthaveaccess) {
print_error('errorenrolmentrequired', 'forum');
}
continue;
}
// If groups are in use and enforced throughout the course then make sure
// we can meet in at least one course level group.
// Note that we check if either the current user or the requested user have
// the capability to access all groups. This is because with that capability
// a user in group A could post in the group B forum. Grrrr.
if (groups_get_course_groupmode($course) == SEPARATEGROUPS && $course->groupmodeforce
&& !has_capability('moodle/site:accessallgroups', $coursecontext) && !has_capability('moodle/site:accessallgroups', $coursecontext, $user->id)) {
// If its the guest user to bad... the guest user cannot access groups
if (!$isloggedin or $isguestuser) {
// do not use require_login() here because we might have already used require_login($course)
if ($musthaveaccess) {
redirect(get_login_url());
}
continue;
}
// Get the groups of the current user
$mygroups = array_keys(groups_get_all_groups($course->id, $USER->id, $course->defaultgroupingid, 'g.id, g.name'));
// Get the groups the requested user is a member of
$usergroups = array_keys(groups_get_all_groups($course->id, $user->id, $course->defaultgroupingid, 'g.id, g.name'));
// Check whether they are members of the same group. If they are great.
$intersect = array_intersect($mygroups, $usergroups);
if (empty($intersect)) {
// But they're not... if it was a specific course throw an error otherwise
// just skip this course so that it is not searched.
if ($musthaveaccess) {
print_error("groupnotamember", '', $CFG->wwwroot."/course/view.php?id=$course->id");
}
continue;
}
}
}
// Woo hoo we got this far which means the current user can search this
// this course for the requested user. Although this is only the course accessibility
// handling that is complete, the forum accessibility tests are yet to come.
$return->courses[$course->id] = $course;
}
// No longer beed $courses array - lose it not it may be big
unset($courses);
// Make sure that we have some courses to search
if (empty($return->courses)) {
// If we don't have any courses to search then the reality is that the current
// user doesn't have access to any courses is which the requested user has posted.
// Although we do know at this point that the requested user has posts.
if ($musthaveaccess) {
print_error('permissiondenied');
} else {
return $return;
}
}
// Next step: Collect all of the forums that we will want to search.
// It is important to note that this step isn't actually about searching, it is
// about determining which forums we can search by testing accessibility.
$forums = forum_get_forums_user_posted_in($user, array_keys($return->courses), $discussionsonly);
// Will be used to build the where conditions for the search
$forumsearchwhere = array();
// Will be used to store the where condition params for the search
$forumsearchparams = array();
// Will record forums where the user can freely access everything
$forumsearchfullaccess = array();
// DB caching friendly
$now = floor(time() / 60) * 60;
// For each course to search we want to find the forums the user has posted in
// and providing the current user can access the forum create a search condition
// for the forum to get the requested users posts.
foreach ($return->courses as $course) {
// Now we need to get the forums
$modinfo = get_fast_modinfo($course);
if (empty($modinfo->instances['forum'])) {
// hmmm, no forums? well at least its easy... skip!
continue;
}
// Iterate
foreach ($modinfo->get_instances_of('forum') as $forumid => $cm) {
if (!$cm->uservisible or !isset($forums[$forumid])) {
continue;
}
// Get the forum in question
$forum = $forums[$forumid];
// This is needed for functionality later on in the forum code. It is converted to an object
// because the cm_info is readonly from 2.6. This is a dirty hack because some other parts of the
// code were expecting an writeable object. See {@link forum_print_post()}.
$forum->cm = new stdClass();
foreach ($cm as $key => $value) {
$forum->cm->$key = $value;
}
// Check that either the current user can view the forum, or that the
// current user has capabilities over the requested user and the requested
// user can view the discussion
if (!has_capability('mod/forum:viewdiscussion', $cm->context) && !($hascapsonuser && has_capability('mod/forum:viewdiscussion', $cm->context, $user->id))) {
continue;
}
// This will contain forum specific where clauses
$forumsearchselect = array();
if (!$iscurrentuser && !$hascapsonuser) {
// Make sure we check group access
if (groups_get_activity_groupmode($cm, $course) == SEPARATEGROUPS and !has_capability('moodle/site:accessallgroups', $cm->context)) {
$groups = $modinfo->get_groups($cm->groupingid);
$groups[] = -1;
list($groupid_sql, $groupid_params) = $DB->get_in_or_equal($groups, SQL_PARAMS_NAMED, 'grps'.$forumid.'_');
$forumsearchparams = array_merge($forumsearchparams, $groupid_params);
$forumsearchselect[] = "d.groupid $groupid_sql";
}
// hidden timed discussions
if (!empty($CFG->forum_enabletimedposts) && !has_capability('mod/forum:viewhiddentimedposts', $cm->context)) {
$forumsearchselect[] = "(d.userid = :userid{$forumid} OR (d.timestart < :timestart{$forumid} AND (d.timeend = 0 OR d.timeend > :timeend{$forumid})))";
$forumsearchparams['userid'.$forumid] = $user->id;
$forumsearchparams['timestart'.$forumid] = $now;
$forumsearchparams['timeend'.$forumid] = $now;
}
// qanda access
if ($forum->type == 'qanda' && !has_capability('mod/forum:viewqandawithoutposting', $cm->context)) {
// We need to check whether the user has posted in the qanda forum.
$discussionspostedin = forum_discussions_user_has_posted_in($forum->id, $user->id);
if (!empty($discussionspostedin)) {
$forumonlydiscussions = array(); // Holds discussion ids for the discussions the user is allowed to see in this forum.
foreach ($discussionspostedin as $d) {
$forumonlydiscussions[] = $d->id;
}
list($discussionid_sql, $discussionid_params) = $DB->get_in_or_equal($forumonlydiscussions, SQL_PARAMS_NAMED, 'qanda'.$forumid.'_');
$forumsearchparams = array_merge($forumsearchparams, $discussionid_params);
$forumsearchselect[] = "(d.id $discussionid_sql OR p.parent = 0)";
} else {
$forumsearchselect[] = "p.parent = 0";
}
}
if (count($forumsearchselect) > 0) {
$forumsearchwhere[] = "(d.forum = :forum{$forumid} AND ".implode(" AND ", $forumsearchselect).")";
$forumsearchparams['forum'.$forumid] = $forumid;
} else {
$forumsearchfullaccess[] = $forumid;
}
} else {
// The current user/parent can see all of their own posts
$forumsearchfullaccess[] = $forumid;
}
}
}
// If we dont have any search conditions, and we don't have any forums where
// the user has full access then we just return the default.
if (empty($forumsearchwhere) && empty($forumsearchfullaccess)) {
return $return;
}
// Prepare a where condition for the full access forums.
if (count($forumsearchfullaccess) > 0) {
list($fullidsql, $fullidparams) = $DB->get_in_or_equal($forumsearchfullaccess, SQL_PARAMS_NAMED, 'fula');
$forumsearchparams = array_merge($forumsearchparams, $fullidparams);
$forumsearchwhere[] = "(d.forum $fullidsql)";
}
// Prepare SQL to both count and search.
// We alias user.id to useridx because we forum_posts already has a userid field and not aliasing this would break
// oracle and mssql.
$userfieldsapi = \core_user\fields::for_userpic();
$userfields = $userfieldsapi->get_sql('u', false, '', 'useridx', false)->selects;
$countsql = 'SELECT COUNT(*) ';
$selectsql = 'SELECT p.*, d.forum, d.name AS discussionname, '.$userfields.' ';
$wheresql = implode(" OR ", $forumsearchwhere);
if ($discussionsonly) {
if ($wheresql == '') {
$wheresql = 'p.parent = 0';
} else {
$wheresql = 'p.parent = 0 AND ('.$wheresql.')';
}
}
$sql = "FROM {forum_posts} p
JOIN {forum_discussions} d ON d.id = p.discussion
JOIN {user} u ON u.id = p.userid
WHERE ($wheresql)
AND p.userid = :userid ";
$orderby = "ORDER BY p.modified DESC";
$forumsearchparams['userid'] = $user->id;
// Set the total number posts made by the requested user that the current user can see
$return->totalcount = $DB->count_records_sql($countsql.$sql, $forumsearchparams);
// Set the collection of posts that has been requested
$return->posts = $DB->get_records_sql($selectsql.$sql.$orderby, $forumsearchparams, $limitfrom, $limitnum);
// We need to build an array of forums for which posts will be displayed.
// We do this here to save the caller needing to retrieve them themselves before
// printing these forums posts. Given we have the forums already there is
// practically no overhead here.
foreach ($return->posts as $post) {
if (!array_key_exists($post->forum, $return->forums)) {
$return->forums[$post->forum] = $forums[$post->forum];
}
}
return $return;
}
/**
* Set the per-forum maildigest option for the specified user.
*
* @param stdClass $forum The forum to set the option for.
* @param int $maildigest The maildigest option.
* @param stdClass $user The user object. This defaults to the global $USER object.
* @throws invalid_digest_setting thrown if an invalid maildigest option is provided.
*/
function forum_set_user_maildigest($forum, $maildigest, $user = null) {
global $DB, $USER;
if (is_number($forum)) {
$forum = $DB->get_record('forum', array('id' => $forum));
}
if ($user === null) {
$user = $USER;
}
$course = $DB->get_record('course', array('id' => $forum->course), '*', MUST_EXIST);
$cm = get_coursemodule_from_instance('forum', $forum->id, $course->id, false, MUST_EXIST);
$context = context_module::instance($cm->id);
// User must be allowed to see this forum.
require_capability('mod/forum:viewdiscussion', $context, $user->id);
// Validate the maildigest setting.
$digestoptions = forum_get_user_digest_options($user);
if (!isset($digestoptions[$maildigest])) {
throw new moodle_exception('invaliddigestsetting', 'mod_forum');
}
// Attempt to retrieve any existing forum digest record.
$subscription = $DB->get_record('forum_digests', array(
'userid' => $user->id,
'forum' => $forum->id,
));
// Create or Update the existing maildigest setting.
if ($subscription) {
if ($maildigest == -1) {
$DB->delete_records('forum_digests', array('forum' => $forum->id, 'userid' => $user->id));
} else if ($maildigest !== $subscription->maildigest) {
// Only update the maildigest setting if it's changed.
$subscription->maildigest = $maildigest;
$DB->update_record('forum_digests', $subscription);
}
} else {
if ($maildigest != -1) {
// Only insert the maildigest setting if it's non-default.
$subscription = new stdClass();
$subscription->forum = $forum->id;
$subscription->userid = $user->id;
$subscription->maildigest = $maildigest;
$subscription->id = $DB->insert_record('forum_digests', $subscription);
}
}
}
/**
* Determine the maildigest setting for the specified user against the
* specified forum.
*
* @param Array $digests An array of forums and user digest settings.
* @param stdClass $user The user object containing the id and maildigest default.
* @param int $forumid The ID of the forum to check.
* @return int The calculated maildigest setting for this user and forum.
*/
function forum_get_user_maildigest_bulk($digests, $user, $forumid) {
if (isset($digests[$forumid]) && isset($digests[$forumid][$user->id])) {
$maildigest = $digests[$forumid][$user->id];
if ($maildigest === -1) {
$maildigest = $user->maildigest;
}
} else {
$maildigest = $user->maildigest;
}
return $maildigest;
}
/**
* Retrieve the list of available user digest options.
*
* @param stdClass $user The user object. This defaults to the global $USER object.
* @return array The mapping of values to digest options.
*/
function forum_get_user_digest_options($user = null) {
global $USER;
// Revert to the global user object.
if ($user === null) {
$user = $USER;
}
$digestoptions = array();
$digestoptions['0'] = get_string('emaildigestoffshort', 'mod_forum');
$digestoptions['1'] = get_string('emaildigestcompleteshort', 'mod_forum');
$digestoptions['2'] = get_string('emaildigestsubjectsshort', 'mod_forum');
// We need to add the default digest option at the end - it relies on
// the contents of the existing values.
$digestoptions['-1'] = get_string('emaildigestdefault', 'mod_forum',
$digestoptions[$user->maildigest]);
// Resort the options to be in a sensible order.
ksort($digestoptions);
return $digestoptions;
}
/**
* Determine the current context if one was not already specified.
*
* If a context of type context_module is specified, it is immediately
* returned and not checked.
*
* @param int $forumid The ID of the forum
* @param context_module $context The current context.
* @return context_module The context determined
*/
function forum_get_context($forumid, $context = null) {
global $PAGE;
if (!$context || !($context instanceof context_module)) {
// Find out forum context. First try to take current page context to save on DB query.
if ($PAGE->cm && $PAGE->cm->modname === 'forum' && $PAGE->cm->instance == $forumid
&& $PAGE->context->contextlevel == CONTEXT_MODULE && $PAGE->context->instanceid == $PAGE->cm->id) {
$context = $PAGE->context;
} else {
$cm = get_coursemodule_from_instance('forum', $forumid);
$context = \context_module::instance($cm->id);
}
}
return $context;
}
/**
* Mark the activity completed (if required) and trigger the course_module_viewed event.
*
* @param stdClass $forum forum object
* @param stdClass $course course object
* @param stdClass $cm course module object
* @param stdClass $context context object
* @since Moodle 2.9
*/
function forum_view($forum, $course, $cm, $context) {
// Completion.
$completion = new completion_info($course);
$completion->set_module_viewed($cm);
// Trigger course_module_viewed event.
$params = array(
'context' => $context,
'objectid' => $forum->id
);
$event = \mod_forum\event\course_module_viewed::create($params);
$event->add_record_snapshot('course_modules', $cm);
$event->add_record_snapshot('course', $course);
$event->add_record_snapshot('forum', $forum);
$event->trigger();
}
/**
* Trigger the discussion viewed event
*
* @param stdClass $modcontext module context object
* @param stdClass $forum forum object
* @param stdClass $discussion discussion object
* @since Moodle 2.9
*/
function forum_discussion_view($modcontext, $forum, $discussion) {
$params = array(
'context' => $modcontext,
'objectid' => $discussion->id,
);
$event = \mod_forum\event\discussion_viewed::create($params);
$event->add_record_snapshot('forum_discussions', $discussion);
$event->add_record_snapshot('forum', $forum);
$event->trigger();
}
/**
* Set the discussion to pinned and trigger the discussion pinned event
*
* @param stdClass $modcontext module context object
* @param stdClass $forum forum object
* @param stdClass $discussion discussion object
* @since Moodle 3.1
*/
function forum_discussion_pin($modcontext, $forum, $discussion) {
global $DB;
$DB->set_field('forum_discussions', 'pinned', FORUM_DISCUSSION_PINNED, array('id' => $discussion->id));
$params = array(
'context' => $modcontext,
'objectid' => $discussion->id,
'other' => array('forumid' => $forum->id)
);
$event = \mod_forum\event\discussion_pinned::create($params);
$event->add_record_snapshot('forum_discussions', $discussion);
$event->trigger();
}
/**
* Set discussion to unpinned and trigger the discussion unpin event
*
* @param stdClass $modcontext module context object
* @param stdClass $forum forum object
* @param stdClass $discussion discussion object
* @since Moodle 3.1
*/
function forum_discussion_unpin($modcontext, $forum, $discussion) {
global $DB;
$DB->set_field('forum_discussions', 'pinned', FORUM_DISCUSSION_UNPINNED, array('id' => $discussion->id));
$params = array(
'context' => $modcontext,
'objectid' => $discussion->id,
'other' => array('forumid' => $forum->id)
);
$event = \mod_forum\event\discussion_unpinned::create($params);
$event->add_record_snapshot('forum_discussions', $discussion);
$event->trigger();
}
/**
* Add nodes to myprofile page.
*
* @param \core_user\output\myprofile\tree $tree Tree object
* @param stdClass $user user object
* @param bool $iscurrentuser
* @param stdClass $course Course object
*
* @return bool
*/
function mod_forum_myprofile_navigation(core_user\output\myprofile\tree $tree, $user, $iscurrentuser, $course) {
if (isguestuser($user)) {
// The guest user cannot post, so it is not possible to view any posts.
// May as well just bail aggressively here.
return false;
}
$postsurl = new moodle_url('/mod/forum/user.php', array('id' => $user->id));
if (!empty($course)) {
$postsurl->param('course', $course->id);
}
$string = get_string('forumposts', 'mod_forum');
$node = new core_user\output\myprofile\node('miscellaneous', 'forumposts', $string, null, $postsurl);
$tree->add_node($node);
$discussionssurl = new moodle_url('/mod/forum/user.php', array('id' => $user->id, 'mode' => 'discussions'));
if (!empty($course)) {
$discussionssurl->param('course', $course->id);
}
$string = get_string('myprofileotherdis', 'mod_forum');
$node = new core_user\output\myprofile\node('miscellaneous', 'forumdiscussions', $string, null,
$discussionssurl);
$tree->add_node($node);
return true;
}
/**
* Checks whether the author's name and picture for a given post should be hidden or not.
*
* @param object $post The forum post.
* @param object $forum The forum object.
* @return bool
* @throws coding_exception
*/
function forum_is_author_hidden($post, $forum) {
if (!isset($post->parent)) {
throw new coding_exception('$post->parent must be set.');
}
if (!isset($forum->type)) {
throw new coding_exception('$forum->type must be set.');
}
if ($forum->type === 'single' && empty($post->parent)) {
return true;
}
return false;
}
/**
* Manage inplace editable saves.
*
* @param string $itemtype The type of item.
* @param int $itemid The ID of the item.
* @param mixed $newvalue The new value
* @return string
*/
function mod_forum_inplace_editable($itemtype, $itemid, $newvalue) {
global $DB, $PAGE;
if ($itemtype === 'digestoptions') {
// The itemid is the forumid.
$forum = $DB->get_record('forum', array('id' => $itemid), '*', MUST_EXIST);
$course = $DB->get_record('course', array('id' => $forum->course), '*', MUST_EXIST);
$cm = get_coursemodule_from_instance('forum', $forum->id, $course->id, false, MUST_EXIST);
$context = context_module::instance($cm->id);
$PAGE->set_context($context);
require_login($course, false, $cm);
forum_set_user_maildigest($forum, $newvalue);
$renderer = $PAGE->get_renderer('mod_forum');
return $renderer->render_digest_options($forum, $newvalue);
}
}
/**
* Determine whether the specified forum's cutoff date is reached.
*
* @param stdClass $forum The forum
* @return bool
*/
function forum_is_cutoff_date_reached($forum) {
$entityfactory = \mod_forum\local\container::get_entity_factory();
$coursemoduleinfo = get_fast_modinfo($forum->course);
$cminfo = $coursemoduleinfo->instances['forum'][$forum->id];
$forumentity = $entityfactory->get_forum_from_stdclass(
$forum,
context_module::instance($cminfo->id),
$cminfo->get_course_module_record(),
$cminfo->get_course()
);
return $forumentity->is_cutoff_date_reached();
}
/**
* Determine whether the specified forum's due date is reached.
*
* @param stdClass $forum The forum
* @return bool
*/
function forum_is_due_date_reached($forum) {
$entityfactory = \mod_forum\local\container::get_entity_factory();
$coursemoduleinfo = get_fast_modinfo($forum->course);
$cminfo = $coursemoduleinfo->instances['forum'][$forum->id];
$forumentity = $entityfactory->get_forum_from_stdclass(
$forum,
context_module::instance($cminfo->id),
$cminfo->get_course_module_record(),
$cminfo->get_course()
);
return $forumentity->is_due_date_reached();
}
/**
* Determine whether the specified discussion is time-locked.
*
* @param stdClass $forum The forum that the discussion belongs to
* @param stdClass $discussion The discussion to test
* @return bool
*/
function forum_discussion_is_locked($forum, $discussion) {
$entityfactory = \mod_forum\local\container::get_entity_factory();
$coursemoduleinfo = get_fast_modinfo($forum->course);
$cminfo = $coursemoduleinfo->instances['forum'][$forum->id];
$forumentity = $entityfactory->get_forum_from_stdclass(
$forum,
context_module::instance($cminfo->id),
$cminfo->get_course_module_record(),
$cminfo->get_course()
);
$discussionentity = $entityfactory->get_discussion_from_stdclass($discussion);
return $forumentity->is_discussion_locked($discussionentity);
}
/**
* Check if the module has any update that affects the current user since a given time.
*
* @param cm_info $cm course module data
* @param int $from the time to check updates from
* @param array $filter if we need to check only specific updates
* @return stdClass an object with the different type of areas indicating if they were updated or not
* @since Moodle 3.2
*/
function forum_check_updates_since(cm_info $cm, $from, $filter = array()) {
$context = $cm->context;
$updates = new stdClass();
if (!has_capability('mod/forum:viewdiscussion', $context)) {
return $updates;
}
$updates = course_check_module_updates_since($cm, $from, array(), $filter);
// Check if there are new discussions in the forum.
$updates->discussions = (object) array('updated' => false);
$discussions = forum_get_discussions($cm, '', false, -1, -1, true, -1, 0, FORUM_POSTS_ALL_USER_GROUPS, $from);
if (!empty($discussions)) {
$updates->discussions->updated = true;
$updates->discussions->itemids = array_keys($discussions);
}
return $updates;
}
/**
* Check if the user can create attachments in a forum.
* @param stdClass $forum forum object
* @param stdClass $context context object
* @return bool true if the user can create attachments, false otherwise
* @since Moodle 3.3
*/
function forum_can_create_attachment($forum, $context) {
// If maxbytes == 1 it means no attachments at all.
if (empty($forum->maxattachments) || $forum->maxbytes == 1 ||
!has_capability('mod/forum:createattachment', $context)) {
return false;
}
return true;
}
/**
* Get icon mapping for font-awesome.
*
* @return array
*/
function mod_forum_get_fontawesome_icon_map() {
return [
'mod_forum:i/pinned' => 'fa-map-pin',
'mod_forum:t/selected' => 'fa-check',
'mod_forum:t/subscribed' => 'fa-envelope-o',
'mod_forum:t/unsubscribed' => 'fa-envelope-open-o',
'mod_forum:t/star' => 'fa-star',
];
}
/**
* Callback function that determines whether an action event should be showing its item count
* based on the event type and the item count.
*
* @param calendar_event $event The calendar event.
* @param int $itemcount The item count associated with the action event.
* @return bool
*/
function mod_forum_core_calendar_event_action_shows_item_count(calendar_event $event, $itemcount = 0) {
// Always show item count for forums if item count is greater than 1.
// If only one action is required than it is obvious and we don't show it for other modules.
return $itemcount > 1;
}
/**
* This function receives a calendar event and returns the action associated with it, or null if there is none.
*
* This is used by block_myoverview in order to display the event appropriately. If null is returned then the event
* is not displayed on the block.
*
* @param calendar_event $event
* @param \core_calendar\action_factory $factory
* @param int $userid User id to use for all capability checks, etc. Set to 0 for current user (default).
* @return \core_calendar\local\event\entities\action_interface|null
*/
function mod_forum_core_calendar_provide_event_action(calendar_event $event,
\core_calendar\action_factory $factory,
int $userid = 0) {
global $DB, $USER;
if (!$userid) {
$userid = $USER->id;
}
$cm = get_fast_modinfo($event->courseid, $userid)->instances['forum'][$event->instance];
if (!$cm->uservisible) {
// The module is not visible to the user for any reason.
return null;
}
$context = context_module::instance($cm->id);
if (!has_capability('mod/forum:viewdiscussion', $context, $userid)) {
return null;
}
$completion = new \completion_info($cm->get_course());
$completiondata = $completion->get_data($cm, false, $userid);
if ($completiondata->completionstate != COMPLETION_INCOMPLETE) {
return null;
}
// Get action itemcount.
$itemcount = 0;
$forum = $DB->get_record('forum', array('id' => $cm->instance));
$postcountsql = "
SELECT
COUNT(1)
FROM
{forum_posts} fp
INNER JOIN {forum_discussions} fd ON fp.discussion=fd.id
WHERE
fp.userid=:userid AND fd.forum=:forumid";
$postcountparams = array('userid' => $userid, 'forumid' => $forum->id);
if ($forum->completiondiscussions) {
$count = $DB->count_records('forum_discussions', array('forum' => $forum->id, 'userid' => $userid));
$itemcount += ($forum->completiondiscussions >= $count) ? ($forum->completiondiscussions - $count) : 0;
}
if ($forum->completionreplies) {
$count = $DB->get_field_sql( $postcountsql.' AND fp.parent<>0', $postcountparams);
$itemcount += ($forum->completionreplies >= $count) ? ($forum->completionreplies - $count) : 0;
}
if ($forum->completionposts) {
$count = $DB->get_field_sql($postcountsql, $postcountparams);
$itemcount += ($forum->completionposts >= $count) ? ($forum->completionposts - $count) : 0;
}
// Well there is always atleast one actionable item (view forum, etc).
$itemcount = $itemcount > 0 ? $itemcount : 1;
return $factory->create_instance(
get_string('view'),
new \moodle_url('/mod/forum/view.php', ['id' => $cm->id]),
$itemcount,
true
);
}
/**
* Add a get_coursemodule_info function in case any forum type wants to add 'extra' information
* for the course (see resource).
*
* Given a course_module object, this function returns any "extra" information that may be needed
* when printing this activity in a course listing. See get_array_of_activities() in course/lib.php.
*
* @param stdClass $coursemodule The coursemodule object (record).
* @return cached_cm_info An object on information that the courses
* will know about (most noticeably, an icon).
*/
function forum_get_coursemodule_info($coursemodule) {
global $DB;
$dbparams = ['id' => $coursemodule->instance];
$fields = 'id, name, intro, introformat, completionposts, completiondiscussions, completionreplies, duedate, cutoffdate';
if (!$forum = $DB->get_record('forum', $dbparams, $fields)) {
return false;
}
$result = new cached_cm_info();
$result->name = $forum->name;
if ($coursemodule->showdescription) {
// Convert intro to html. Do not filter cached version, filters run at display time.
$result->content = format_module_intro('forum', $forum, $coursemodule->id, false);
}
// Populate the custom completion rules as key => value pairs, but only if the completion mode is 'automatic'.
if ($coursemodule->completion == COMPLETION_TRACKING_AUTOMATIC) {
$result->customdata['customcompletionrules']['completiondiscussions'] = $forum->completiondiscussions;
$result->customdata['customcompletionrules']['completionreplies'] = $forum->completionreplies;
$result->customdata['customcompletionrules']['completionposts'] = $forum->completionposts;
}
// Populate some other values that can be used in calendar or on dashboard.
if ($forum->duedate) {
$result->customdata['duedate'] = $forum->duedate;
}
if ($forum->cutoffdate) {
$result->customdata['cutoffdate'] = $forum->cutoffdate;
}
return $result;
}
/**
* Callback which returns human-readable strings describing the active completion custom rules for the module instance.
*
* @param cm_info|stdClass $cm object with fields ->completion and ->customdata['customcompletionrules']
* @return array $descriptions the array of descriptions for the custom rules.
*/
function mod_forum_get_completion_active_rule_descriptions($cm) {
// Values will be present in cm_info, and we assume these are up to date.
if (empty($cm->customdata['customcompletionrules'])
|| $cm->completion != COMPLETION_TRACKING_AUTOMATIC) {
return [];
}
$descriptions = [];
foreach ($cm->customdata['customcompletionrules'] as $key => $val) {
switch ($key) {
case 'completiondiscussions':
if (!empty($val)) {
$descriptions[] = get_string('completiondiscussionsdesc', 'forum', $val);
}
break;
case 'completionreplies':
if (!empty($val)) {
$descriptions[] = get_string('completionrepliesdesc', 'forum', $val);
}
break;
case 'completionposts':
if (!empty($val)) {
$descriptions[] = get_string('completionpostsdesc', 'forum', $val);
}
break;
default:
break;
}
}
return $descriptions;
}
/**
* Check whether the forum post is a private reply visible to this user.
*
* @param stdClass $post The post to check.
* @param cm_info $cm The context module instance.
* @return bool Whether the post is visible in terms of private reply configuration.
*/
function forum_post_is_visible_privately($post, $cm) {
global $USER;
if (!empty($post->privatereplyto)) {
// Allow the user to see the private reply if:
// * they hold the permission;
// * they are the author; or
// * they are the intended recipient.
$cansee = false;
$cansee = $cansee || ($post->userid == $USER->id);
$cansee = $cansee || ($post->privatereplyto == $USER->id);
$cansee = $cansee || has_capability('mod/forum:readprivatereplies', context_module::instance($cm->id));
return $cansee;
}
return true;
}
/**
* Check whether the user can reply privately to the parent post.
*
* @param \context_module $context
* @param \stdClass $parent
* @return bool
*/
function forum_user_can_reply_privately(\context_module $context, \stdClass $parent) : bool {
if ($parent->privatereplyto) {
// You cannot reply privately to a post which is, itself, a private reply.
return false;
}
return has_capability('mod/forum:postprivatereply', $context);
}
/**
* This function calculates the minimum and maximum cutoff values for the timestart of
* the given event.
*
* It will return an array with two values, the first being the minimum cutoff value and
* the second being the maximum cutoff value. Either or both values can be null, which
* indicates there is no minimum or maximum, respectively.
*
* If a cutoff is required then the function must return an array containing the cutoff
* timestamp and error string to display to the user if the cutoff value is violated.
*
* A minimum and maximum cutoff return value will look like:
* [
* [1505704373, 'The date must be after this date'],
* [1506741172, 'The date must be before this date']
* ]
*
* @param calendar_event $event The calendar event to get the time range for
* @param stdClass $forum The module instance to get the range from
* @return array Returns an array with min and max date.
*/
function mod_forum_core_calendar_get_valid_event_timestart_range(\calendar_event $event, \stdClass $forum) {
global $CFG;
require_once($CFG->dirroot . '/mod/forum/locallib.php');
$mindate = null;
$maxdate = null;
if ($event->eventtype == FORUM_EVENT_TYPE_DUE) {
if (!empty($forum->cutoffdate)) {
$maxdate = [
$forum->cutoffdate,
get_string('cutoffdatevalidation', 'forum'),
];
}
}
return [$mindate, $maxdate];
}
/**
* This function will update the forum module according to the
* event that has been modified.
*
* It will set the timeclose value of the forum instance
* according to the type of event provided.
*
* @throws \moodle_exception
* @param \calendar_event $event
* @param stdClass $forum The module instance to get the range from
*/
function mod_forum_core_calendar_event_timestart_updated(\calendar_event $event, \stdClass $forum) {
global $CFG, $DB;
require_once($CFG->dirroot . '/mod/forum/locallib.php');
if ($event->eventtype != FORUM_EVENT_TYPE_DUE) {
return;
}
$courseid = $event->courseid;
$modulename = $event->modulename;
$instanceid = $event->instance;
// Something weird going on. The event is for a different module so
// we should ignore it.
if ($modulename != 'forum') {
return;
}
if ($forum->id != $instanceid) {
return;
}
$coursemodule = get_fast_modinfo($courseid)->instances[$modulename][$instanceid];
$context = context_module::instance($coursemodule->id);
// The user does not have the capability to modify this activity.
if (!has_capability('moodle/course:manageactivities', $context)) {
return;
}
if ($event->eventtype == FORUM_EVENT_TYPE_DUE) {
if ($forum->duedate != $event->timestart) {
$forum->duedate = $event->timestart;
$forum->timemodified = time();
// Persist the instance changes.
$DB->update_record('forum', $forum);
$event = \core\event\course_module_updated::create_from_cm($coursemodule, $context);
$event->trigger();
}
}
}
/**
* Fetch the data used to display the discussions on the current page.
*
* @param \mod_forum\local\entities\forum $forum The forum entity
* @param stdClass $user The user to render for
* @param int[]|null $groupid The group to render
* @param int|null $sortorder The sort order to use when selecting the discussions in the list
* @param int|null $pageno The zero-indexed page number to use
* @param int|null $pagesize The number of discussions to show on the page
* @return array The data to use for display
*/
function mod_forum_get_discussion_summaries(\mod_forum\local\entities\forum $forum, stdClass $user, ?int $groupid, ?int $sortorder,
?int $pageno = 0, ?int $pagesize = 0) {
$vaultfactory = mod_forum\local\container::get_vault_factory();
$discussionvault = $vaultfactory->get_discussions_in_forum_vault();
$managerfactory = mod_forum\local\container::get_manager_factory();
$capabilitymanager = $managerfactory->get_capability_manager($forum);
$groupids = mod_forum_get_groups_from_groupid($forum, $user, $groupid);
if (null === $groupids) {
return $discussions = $discussionvault->get_from_forum_id(
$forum->get_id(),
$capabilitymanager->can_view_hidden_posts($user),
$user->id,
$sortorder,
$pagesize,
$pageno * $pagesize);
} else {
return $discussions = $discussionvault->get_from_forum_id_and_group_id(
$forum->get_id(),
$groupids,
$capabilitymanager->can_view_hidden_posts($user),
$user->id,
$sortorder,
$pagesize,
$pageno * $pagesize);
}
}
/**
* Get a count of all discussions in a forum.
*
* @param \mod_forum\local\entities\forum $forum The forum entity
* @param stdClass $user The user to render for
* @param int $groupid The group to render
* @return int The number of discussions in a forum
*/
function mod_forum_count_all_discussions(\mod_forum\local\entities\forum $forum, stdClass $user, ?int $groupid) {
$managerfactory = mod_forum\local\container::get_manager_factory();
$capabilitymanager = $managerfactory->get_capability_manager($forum);
$vaultfactory = mod_forum\local\container::get_vault_factory();
$discussionvault = $vaultfactory->get_discussions_in_forum_vault();
$groupids = mod_forum_get_groups_from_groupid($forum, $user, $groupid);
if (null === $groupids) {
return $discussionvault->get_total_discussion_count_from_forum_id(
$forum->get_id(),
$capabilitymanager->can_view_hidden_posts($user),
$user->id);
} else {
return $discussionvault->get_total_discussion_count_from_forum_id_and_group_id(
$forum->get_id(),
$groupids,
$capabilitymanager->can_view_hidden_posts($user),
$user->id);
}
}
/**
* Get the list of groups to show based on the current user and requested groupid.
*
* @param \mod_forum\local\entities\forum $forum The forum entity
* @param stdClass $user The user viewing
* @param int $groupid The groupid requested
* @return array The list of groups to show
*/
function mod_forum_get_groups_from_groupid(\mod_forum\local\entities\forum $forum, stdClass $user, ?int $groupid) : ?array {
$effectivegroupmode = $forum->get_effective_group_mode();
if (empty($effectivegroupmode)) {
// This forum is not in a group mode. Show all posts always.
return null;
}
if (null == $groupid) {
$managerfactory = mod_forum\local\container::get_manager_factory();
$capabilitymanager = $managerfactory->get_capability_manager($forum);
// No group was specified.
$showallgroups = (VISIBLEGROUPS == $effectivegroupmode);
$showallgroups = $showallgroups || $capabilitymanager->can_access_all_groups($user);
if ($showallgroups) {
// Return null to show all groups.
return null;
} else {
// No group was specified. Only show the users current groups.
return array_keys(
groups_get_all_groups(
$forum->get_course_id(),
$user->id,
$forum->get_course_module_record()->groupingid
)
);
}
} else {
// A group was specified. Just show that group.
return [$groupid];
}
}
/**
* Return a list of all the user preferences used by mod_forum.
*
* @return array
*/
function mod_forum_user_preferences() {
$vaultfactory = \mod_forum\local\container::get_vault_factory();
$discussionlistvault = $vaultfactory->get_discussions_in_forum_vault();
$preferences = array();
$preferences['forum_discussionlistsortorder'] = array(
'null' => NULL_NOT_ALLOWED,
'default' => $discussionlistvault::SORTORDER_LASTPOST_DESC,
'type' => PARAM_INT,
'choices' => array(
$discussionlistvault::SORTORDER_LASTPOST_DESC,
$discussionlistvault::SORTORDER_LASTPOST_ASC,
$discussionlistvault::SORTORDER_CREATED_DESC,
$discussionlistvault::SORTORDER_CREATED_ASC,
$discussionlistvault::SORTORDER_REPLIES_DESC,
$discussionlistvault::SORTORDER_REPLIES_ASC
)
);
$preferences['forum_useexperimentalui'] = [
'null' => NULL_NOT_ALLOWED,
'default' => false,
'type' => PARAM_BOOL
];
return $preferences;
}
/**
* Lists all gradable areas for the advanced grading methods gramework.
*
* @return array('string'=>'string') An array with area names as keys and descriptions as values
*/
function forum_grading_areas_list() {
return [
'forum' => get_string('grade_forum_header', 'forum'),
];
}
/**
* Callback to fetch the activity event type lang string.
*
* @param string $eventtype The event type.
* @return lang_string The event type lang string.
*/
function mod_forum_core_calendar_get_event_action_string(string $eventtype): string {
global $CFG;
require_once($CFG->dirroot . '/mod/forum/locallib.php');
$modulename = get_string('modulename', 'forum');
if ($eventtype == FORUM_EVENT_TYPE_DUE) {
return get_string('calendardue', 'forum', $modulename);
} else {
return get_string('requiresaction', 'calendar', $modulename);
}
}
| gpl-3.0 |
OSSystems/glmark2 | src/libmatrix/util.cc | 9605 | //
// Copyright (c) 2010-2011 Linaro Limited
//
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the MIT License which accompanies
// this distribution, and is available at
// http://www.opensource.org/licenses/mit-license.php
//
// Contributors:
// Alexandros Frantzis <[email protected]>
// Jesse Barker <[email protected]>
//
#include <sstream>
#include <fstream>
#include <sys/time.h>
#ifdef ANDROID
#include <android/asset_manager.h>
#else
#include <dirent.h>
#endif
#include "log.h"
#include "util.h"
using std::string;
using std::vector;
/*
* State machine for bash-like quoted string escaping:
*
* \
* -----------> +---------+
* | ---------- | Escaped |
* | | *,ESC +---------+
* | |
* | v '
* +--------+ ---> +--------------+ -----
* | Normal | <--- | SingleQuoted | | *, ESC
* +--------+ ' +--------------+ <----
* | ^
* | |
* | | " +--------------+ ----
* | ---------- | DoubleQuoted | | *, ESC
* -----------> +--------------+ <---
* " | ^
* \ | | *, ESC
* v |
* +---------------------+
* | DoubleQuotedEscaped |
* +---------------------+
*
* ESC: Mark character as Escaped
*/
static void
fill_escape_vector(const string &str, vector<bool> &esc_vec)
{
enum State {
StateNormal,
StateEscaped,
StateDoubleQuoted,
StateDoubleQuotedEscaped,
StateSingleQuoted
};
State state = StateNormal;
for (string::const_iterator iter = str.begin();
iter != str.end();
iter++)
{
const char c(*iter);
bool esc = false;
switch (state) {
case StateNormal:
if (c == '"')
state = StateDoubleQuoted;
else if (c == '\\')
state = StateEscaped;
else if (c == '\'')
state = StateSingleQuoted;
break;
case StateEscaped:
esc = true;
state = StateNormal;
break;
case StateDoubleQuoted:
if (c == '"')
state = StateNormal;
else if (c == '\\')
state = StateDoubleQuotedEscaped;
else
esc = true;
break;
case StateDoubleQuotedEscaped:
esc = true;
state = StateDoubleQuoted;
break;
case StateSingleQuoted:
if (c == '\'')
state = StateNormal;
else
esc = true;
default:
break;
}
esc_vec.push_back(esc);
}
}
static void
split_normal(const string& src, char delim, vector<string>& elementVec)
{
std::stringstream ss(src);
string item;
while(std::getline(ss, item, delim))
elementVec.push_back(item);
}
static void
split_fuzzy(const string& src, char delim, vector<string>& elementVec)
{
// Fuzzy case: Initialize our delimiter string based upon the caller's plus
// a space to allow for more flexibility.
string delimiter(" ");
delimiter += delim;
// Starting index into the string of the first token (by definition, if
// we're parsing a string, there is at least one token).
string::size_type startPos(0);
// string::find_first_of() looks for any character in the string provided,
// it is not treated as a sub-string, so regardless of where the space or
// comma is or how many there are, the result is the same.
string str(src);
string::size_type endPos = str.find_first_of(delimiter);
while (endPos != string::npos)
{
// Push back the current element starting at startPos for
// (endPos - startPos) characters. std::string takes care of
// terminators, etc.
elementVec.push_back(string(str, startPos, endPos - startPos));
// Index of the next element after any delimiter characters. Same
// caveat applies to find_first_not_of() that applies to
// find_first_of(); endPos tells it where to start the search.
string::size_type nextPos = str.find_first_not_of(delimiter, endPos);
// Erase the part of the string we've already parsed.
str = str.erase(startPos, nextPos - startPos);
// Look for the next delimiter. If there isn't one, we bail out.
endPos = str.find_first_of(delimiter);
}
// Regardless of whether we initially had one element or many, 'str' now
// only contains one.
elementVec.push_back(str);
}
static void
split_quoted(const string& src, char delim, vector<string>& elementVec)
{
std::stringstream ss;
vector<bool> escVec;
/* Mark characters in the string as escaped or not */
fill_escape_vector(src, escVec);
/* Sanity check... */
if (src.length() != escVec.size())
return;
for (vector<bool>::const_iterator iter = escVec.begin();
iter != escVec.end();
iter++)
{
bool escaped = static_cast<bool>(*iter);
char c = src[iter - escVec.begin()];
/* Output all characters, except unescaped ",\,' */
if ((c != '"' && c != '\\' && c != '\'') || escaped) {
/* If we reach an unescaped delimiter character, do a split */
if (c == delim && !escaped) {
elementVec.push_back(ss.str());
ss.str("");
ss.clear();
}
else {
ss << c;
}
}
}
/* Handle final element, delimited by end of string */
const string &finalElement(ss.str());
if (!finalElement.empty())
elementVec.push_back(finalElement);
}
void
Util::split(const string& src, char delim, vector<string>& elementVec,
Util::SplitMode mode)
{
// Trivial rejection
if (src.empty())
{
return;
}
switch (mode)
{
case Util::SplitModeNormal:
return split_normal(src, delim, elementVec);
case Util::SplitModeFuzzy:
return split_fuzzy(src, delim, elementVec);
case Util::SplitModeQuoted:
return split_quoted(src, delim, elementVec);
default:
break;
}
}
uint64_t
Util::get_timestamp_us()
{
struct timeval tv;
gettimeofday(&tv, NULL);
uint64_t now = static_cast<uint64_t>(tv.tv_sec) * 1000000 +
static_cast<double>(tv.tv_usec);
return now;
}
std::string
Util::appname_from_path(const std::string& path)
{
std::string::size_type slashPos = path.rfind("/");
std::string::size_type startPos(0);
if (slashPos != std::string::npos)
{
startPos = slashPos + 1;
}
return std::string(path, startPos, std::string::npos);
}
#ifndef ANDROID
std::istream *
Util::get_resource(const std::string &path)
{
std::ifstream *ifs = new std::ifstream(path.c_str());
return static_cast<std::istream *>(ifs);
}
void
Util::list_files(const std::string& dirName, std::vector<std::string>& fileVec)
{
DIR* dir = opendir(dirName.c_str());
if (!dir)
{
Log::error("Failed to open models directory '%s'\n", dirName.c_str());
return;
}
struct dirent* entry = readdir(dir);
while (entry)
{
std::string pathname(dirName + "/");
pathname += std::string(entry->d_name);
// Skip '.' and '..'
if (entry->d_name[0] != '.')
{
fileVec.push_back(pathname);
}
entry = readdir(dir);
}
closedir(dir);
}
#else
AAssetManager *Util::android_asset_manager = 0;
void
Util::android_set_asset_manager(AAssetManager *asset_manager)
{
Util::android_asset_manager = asset_manager;
}
AAssetManager *
Util::android_get_asset_manager()
{
return Util::android_asset_manager;
}
std::istream *
Util::get_resource(const std::string &path)
{
std::string path2(path);
/* Remove leading '/' from path name, it confuses the AssetManager */
if (path2.size() > 0 && path2[0] == '/')
path2.erase(0, 1);
std::stringstream *ss = new std::stringstream;
AAsset *asset = AAssetManager_open(Util::android_asset_manager,
path2.c_str(), AASSET_MODE_RANDOM);
if (asset) {
ss->write(reinterpret_cast<const char *>(AAsset_getBuffer(asset)),
AAsset_getLength(asset));
Log::debug("Load asset %s\n", path2.c_str());
AAsset_close(asset);
}
else {
Log::error("Couldn't load asset %s\n", path2.c_str());
}
return static_cast<std::istream *>(ss);
}
void
Util::list_files(const std::string& dirName, std::vector<std::string>& fileVec)
{
AAssetManager *mgr(Util::android_get_asset_manager());
std::string dir_name(dirName);
/* Remove leading '/' from path, it confuses the AssetManager */
if (dir_name.size() > 0 && dir_name[0] == '/')
dir_name.erase(0, 1);
AAssetDir* dir = AAssetManager_openDir(mgr, dir_name.c_str());
if (!dir)
{
Log::error("Failed to open models directory '%s'\n", dir_name.c_str());
return;
}
const char *filename(0);
while ((filename = AAssetDir_getNextFileName(dir)) != 0)
{
std::string pathname(dir_name + "/");
pathname += std::string(filename);
fileVec.push_back(pathname);
}
AAssetDir_close(dir);
}
#endif
| gpl-3.0 |
jlegendary/opencog | opencog/embodiment/Learning/behavior/CompositeBehaviorDescription.cc | 11220 | /*
* opencog/embodiment/Learning/behavior/CompositeBehaviorDescription.cc
*
* Copyright (C) 2002-2009 Novamente LLC
* All Rights Reserved
* Author(s): Andre Senna
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License v3 as
* published by the Free Software Foundation and including the exceptions
* at http://opencog.org/wiki/Licenses
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program; if not, write to:
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <stdio.h>
#include "ElementaryBehaviorDescription.h"
#include "CompositeBehaviorDescription.h"
#include <opencog/atomspace/AtomSpace.h>
#include <opencog/server/CogServer.h>
#include <algorithm>
#include <opencog/util/exceptions.h>
#include <opencog/util/oc_assert.h>
using namespace behavior;
CompositeBehaviorDescription::CompositeBehaviorDescription(AtomSpace *_atomspace) : atomspace(_atomspace)
{
timelineRepresentationIsValid = false;
}
CompositeBehaviorDescription::~CompositeBehaviorDescription()
{
}
bool CompositeBehaviorDescription::empty() const
{
return entries.empty();
}
unsigned int CompositeBehaviorDescription::size() const
{
return entries.size();
}
void CompositeBehaviorDescription::clear()
{
if (entries.empty()) {
return;
}
entries.clear();
timelineSets.clear();
timelineIntervals.clear();
timelineRepresentationIsValid = false;
}
void CompositeBehaviorDescription::addPredicate(const ElementaryBehaviorDescription& ebd)
{
entries.push_back(ebd);
timelineRepresentationIsValid = false;
}
void CompositeBehaviorDescription::addPredicate(Handle handle, const Temporal &interval)
{
ElementaryBehaviorDescription newEntry(handle, interval);
entries.push_back(newEntry);
timelineRepresentationIsValid = false;
}
void CompositeBehaviorDescription::addPredicate(Handle handle,
unsigned long start_time,
unsigned long end_time)
{
addPredicate(handle, Temporal(start_time, end_time));
}
const std::vector<PredicateHandleSet> &CompositeBehaviorDescription::getTimelineSets() const
{
if (! timelineRepresentationIsValid) {
buildTimelineRepresentation();
}
return timelineSets;
}
unsigned long CompositeBehaviorDescription::getIndexStartTime(unsigned index) const
{
if (! timelineRepresentationIsValid) {
buildTimelineRepresentation();
}
unsigned long t = getStartTime();
unsigned i = 0;
for (std::vector<long>::const_iterator tli = timelineIntervals.begin();
tli != timelineIntervals.end() && i < index; ++tli, i++) {
t += (unsigned long) * tli;
}
return t;
}
const std::vector<long> &CompositeBehaviorDescription::getTimelineIntervals() const
{
if (! timelineRepresentationIsValid) {
buildTimelineRepresentation();
}
return timelineIntervals;
}
unsigned long CompositeBehaviorDescription::getStartTime() const
{
if (entries.empty())
return 0;
else {
octime_t minT;
std::vector<ElementaryBehaviorDescription>::iterator ei = entries.begin();
minT = ei->temporal.getLowerBound();
++ei;
for (; ei != entries.end(); ++ei)
minT = std::min(minT, ei->temporal.getLowerBound());
return minT;
}
}
unsigned long CompositeBehaviorDescription::getEndTime() const
{
if (entries.empty())
return 0;
else {
octime_t maxT;
std::vector<ElementaryBehaviorDescription>::iterator ei = entries.begin();
maxT = ei->temporal.getUpperBound();
++ei;
for (; ei != entries.end(); ++ei)
maxT = std::max(maxT, ei->temporal.getUpperBound());
return maxT;
}
}
Temporal CompositeBehaviorDescription::getTimeInterval() const
{
if (entries.empty())
return Temporal(0, 0);
else {
octime_t minT, maxT;
std::vector<ElementaryBehaviorDescription>::iterator ei = entries.begin();
minT = ei->temporal.getLowerBound();
maxT = ei->temporal.getUpperBound();
++ei;
for (; ei != entries.end(); ++ei) {
minT = std::min(minT, ei->temporal.getLowerBound());
maxT = std::max(maxT, ei->temporal.getUpperBound());
}
return Temporal(minT, maxT);
}
}
const std::vector<ElementaryBehaviorDescription> &CompositeBehaviorDescription::getEntries() const
{
return entries;
}
// ********************************************************************************
// Private API
// Static to avoid linking conflicts with possible homonimous functions from
// anywhere else
static bool pairComparator(ElementaryBehaviorDescription a, ElementaryBehaviorDescription b)
{
return a.temporal.getLowerBound() < b.temporal.getLowerBound();
}
std::vector<unsigned long> *CompositeBehaviorDescription::buildSeparatorsVector(const std::vector<ElementaryBehaviorDescription> &entries)
{
std::vector<unsigned long> v(2 * entries.size());
for (unsigned int i = 0; i < entries.size(); i++) {
v[2 * i] = entries[i].temporal.getLowerBound();
v[2 * i + 1] = entries[i].temporal.getUpperBound();
}
sort(v.begin(), v.end());
std::vector<unsigned long> *answer = new std::vector<unsigned long>();
answer->push_back(v[0]);
unsigned long last = v[0];
for (unsigned int j = 1; j < v.size(); j++) {
if (v[j] != last) {
answer->push_back(v[j]);
last = v[j];
}
}
if (answer->size() == 1) {
answer->push_back(last);
}
return answer;
}
void CompositeBehaviorDescription::buildTimelineRepresentation() const
{
buildTimelineRepresentation(timelineSets, timelineIntervals, entries);
}
void CompositeBehaviorDescription::buildTimelineRepresentation(std::vector<PredicateHandleSet> &timelineSets, std::vector<long> &timelineIntervals, std::vector<ElementaryBehaviorDescription> &entries) const
{
if (entries.size() == 0) {
timelineRepresentationIsValid = true;
return;
}
if (! timelineSets.empty()) {
timelineSets.clear();
}
if (! timelineIntervals.empty()) {
timelineIntervals.clear();
}
std::vector<unsigned long> *separators = buildSeparatorsVector(entries);
sort(entries.begin(), entries.end(), pairComparator);
PredicateHandleSet currentSet;
unsigned int cursor = 0;
for (unsigned int i = 0; i < (separators->size() - 1); i++) {
currentSet.clear();
unsigned long intervalStart = separators->at(i);
unsigned long intervalEnd = separators->at(i + 1);
while ((cursor < entries.size()) && (entries[cursor].temporal.getUpperBound() < intervalStart)) {
cursor++;
}
for (unsigned int j = cursor; j < entries.size(); j++) {
unsigned long lower = entries[j].temporal.getLowerBound();
unsigned long upper = entries[j].temporal.getUpperBound();
Handle handle = entries[j].handle;
if (((upper > intervalStart) && (lower < intervalEnd)) ||
((upper == lower) && (upper == intervalStart)) ||
((upper == lower) && (upper == intervalEnd))) {
currentSet.insert(handle);
}
}
timelineSets.push_back(currentSet);
timelineIntervals.push_back(intervalEnd - intervalStart);
}
delete separators;
OC_ASSERT(timelineSets.size() == timelineIntervals.size());
timelineRepresentationIsValid = true;
}
std::string CompositeBehaviorDescription::toString() const
{
//this is actually not fundamently required but forces to print
//the CBD in chronological order
if (! timelineRepresentationIsValid) {
buildTimelineRepresentation();
}
std::string answer = "{";
for (unsigned int i = 0; i < entries.size(); i++) {
answer.append("(");
answer.append(atomspace->atom_as_string(entries[i].handle));
answer.append(",");
answer.append(entries[i].temporal.toString());
answer.append(")");
if (i < (entries.size() - 1)) {
answer.append(",");
}
}
answer.append("}");
return answer;
}
// ********************************************************************************
// Test/debug
bool CompositeBehaviorDescription::equals(const CompositeBehaviorDescription &other) const
{
if (entries.size() != other.entries.size()) {
return false;
}
this->buildTimelineRepresentation();
other.buildTimelineRepresentation();
for (unsigned int i = 0; i < entries.size(); i++) {
if (entries[i].handle != other.entries[i].handle)
return false;
else if (entries[i].temporal != other.entries[i].temporal)
return false;
}
return true;
}
std::string CompositeBehaviorDescription::toStringHandles()
{
std::string answer = "{";
for (unsigned int i = 0; i < entries.size(); i++) {
answer.append("(");
answer.append(atomspace->get_name(entries[i].handle));
answer.append(",");
answer.append(entries[i].temporal.toString());
answer.append(")");
if (i == (entries.size() - 1)) {
answer.append("}");
} else {
answer.append(",");
}
}
return answer;
}
std::string CompositeBehaviorDescription::toStringTimeline()
{
if (! timelineRepresentationIsValid) {
buildTimelineRepresentation();
}
return toStringTimeline(timelineSets, timelineIntervals);
}
std::string CompositeBehaviorDescription::toStringTimeline(
std::vector<PredicateHandleSet> &timelineSets,
std::vector<long> &timelineIntervals)
{
//TODO: contigous equals sets should be merged
std::string answer = "{";
for (unsigned int i = 0; i < timelineSets.size(); i++) {
answer.append("(");
answer.append("{");
std::vector<std::string> names;
for (std::set<Handle>::iterator it = timelineSets[i].getSet().begin(); it != timelineSets[i].getSet().end(); ++it) {
//the assert below is here to insure that the atom is a node
OC_ASSERT(atomspace->is_node(*it));
names.push_back(atomspace->get_name(*it));
}
std::sort(names.begin(), names.end());
for (std::vector<std::string>::iterator it = names.begin(); it != names.end(); ++it) {
if (it != names.begin()) {
answer.append(",");
}
answer.append(*it);
}
answer.append("},");
char s[128];
sprintf(s, "%ld", timelineIntervals[i]);
answer.append(s);
answer.append(")");
if (i != (timelineSets.size() - 1)) {
answer.append(",");
}
}
answer.append("}");
return answer;
}
| agpl-3.0 |
ging/vish | lib/plugins/ediphy/app/assets/javascripts/lib/ckeditor/plugins/contextmenu/lang/sr.js | 241 | /*
Copyright (c) 2003-2017, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'contextmenu', 'sr', {
options: 'Context Menu Options' // MISSING
} );
| agpl-3.0 |
sourcefabric/Booktype | lib/booktype/apps/edit/static/edit/js/aloha/plugins/common/contenthandler/lib/genericcontenthandler.js | 9795 | /* genericcontenthandler.js is part of Aloha Editor project http://aloha-editor.org
*
* Aloha Editor is a WYSIWYG HTML5 inline editing library and editor.
* Copyright (c) 2010-2012 Gentics Software GmbH, Vienna, Austria.
* Contributors http://aloha-editor.org/contribution.php
*
* Aloha Editor is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or any later version.
*
* Aloha Editor is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* As an additional permission to the GNU GPL version 2, you may distribute
* non-source (e.g., minimized or compacted) forms of the Aloha-Editor
* source code without the copy of the GNU GPL normally required,
* provided you include this license notice and a URL through which
* recipients can access the Corresponding Source.
*/
define([
'jquery',
'aloha',
'aloha/contenthandlermanager',
'contenthandler/contenthandler-utils'
], function (
$,
Aloha,
Manager,
Utils
) {
'use strict';
/**
* Tags used for semantic formatting
* @type {Array.<String>}
* @see GenericContentHandler#transformFormattings
*/
var formattingTags = ['strong', 'em', 's', 'u', 'strike'];
/**
* Transforms all tables in the given content to make them ready to for
* use with Aloha's table handling.
*
* Cleans tables of their unwanted attributes.
* Normalizes table cells.
*
* @param {jQuery.<HTMLElement>} $content
*/
function prepareTables($content) {
// Because Aloha does not provide a way for the editor to
// manipulate borders, cellspacing, cellpadding in tables.
// @todo what about width, height?
$content.find('table')
.removeAttr('cellpadding')
.removeAttr('cellspacing')
.removeAttr('border')
.removeAttr('border-top')
.removeAttr('border-bottom')
.removeAttr('border-left')
.removeAttr('border-right');
$content.find('td').each(function () {
var td = this;
// Because cells with a single empty <p> are rendered to appear
// like empty cells, it simplifies the handeling of cells to
// normalize these table cells to contain actual white space
// instead.
if (Utils.isProppedParagraph(td.innerHTML)) {
td.innerHTML = ' ';
}
// Because a single <p> wrapping the contents of a <td> is
// initially superfluous and should be stripped out.
var $p = $('>p', td);
if (1 === $p.length) {
$p.contents().unwrap();
}
});
// Because Aloha does not provide a means for editors to manipulate
// these properties.
$content.find('table,th,td,tr')
.removeAttr('width')
.removeAttr('height')
.removeAttr('valign');
// Because Aloha table handling simply does not regard colgroups.
// @TODO Use sanitize.js?
$content.find('colgroup').remove();
}
/**
* Return true if the nodeType is allowed in the settings,
* Aloha.settings.contentHandler.allows.elements
*
* @param {String} nodeType The tag name of the element to evaluate
*
* @return {Boolean}
*/
function isAllowedNodeName(nodeType) {
return !!(
Aloha.settings.contentHandler &&
Aloha.settings.contentHandler.allows &&
Aloha.settings.contentHandler.allows.elements &&
($.inArray(
nodeType.toLowerCase(),
Aloha.settings.contentHandler.allows.elements
) !== -1
)
);
}
var GenericContentHandler = Manager.createHandler({
/**
* Transforms pasted content to make it safe and ready to be used in
* Aloha Editables.
*
* @param {jQuery.<HTMLElement>|string} content
* @return {string} Clean HTML
*/
handleContent: function (content) {
var $content = Utils.wrapContent(content);
if (!$content) {
return content;
}
// If an aloha-block is found inside the pasted content, no modify
// should be made in the pasted content because it can be assumed
// this is content deliberately placed by Aloha and should not be
// cleaned.
if ($content.find('.aloha-block').length) {
return $content.html();
}
prepareTables($content);
this.cleanLists($content);
this.removeComments($content);
this.unwrapTags($content);
this.removeStyles($content);
this.removeNamespacedElements($content);
//this.transformLinks($content);
var transformFormatting = true;
if (Aloha.settings.contentHandler &&
Aloha.settings.contentHandler.handler &&
Aloha.settings.contentHandler.handler.generic &&
typeof Aloha.settings.contentHandler.handler.generic.transformFormattings !== 'undefinded' &&
!Aloha.settings.contentHandler.handler.generic.transformFormattings) {
transformFormatting = false;
}
if (transformFormatting) {
this.transformFormattings($content);
}
return $content.html();
},
/**
* Cleans lists.
* The only allowed children of ol or ul elements are li's. Everything
* else will be removed.
*
* See http://validator.w3.org/check with following invalid markup for
* example:
* <!DOCTYPE html><head><title></title></head><ul><li>ok</li><ol></ol></ul>
*
* @param {jQuery.<HTMLElement>} $content
*/
cleanLists: function ($content) {
$content.find('ul,ol').find('>:not(li)').remove();
},
/**
* Transform formattings
* @param content
*/
transformFormattings: function (content) {
// find all formattings we will transform
// @todo this makes troubles -- don't change semantics! at least in this way...
var selectors = [],
i;
for (i = 0; i < formattingTags.length; i++) {
if (!isAllowedNodeName(formattingTags[i])) {
selectors.push(formattingTags[i]);
}
}
content.find(selectors.join(',')).each(function () {
if (this.nodeName === 'STRONG') {
// transform strong to b
Aloha.Markup.transformDomObject($(this), 'b');
} else if (this.nodeName === 'EM') {
// transform em to i
Aloha.Markup.transformDomObject($(this), 'i');
} else if (this.nodeName === 'S' || this.nodeName == 'STRIKE') {
// transform s and strike to del
Aloha.Markup.transformDomObject($(this), 'del');
} else if (this.nodeName === 'U') {
// transform u?
$(this).contents().unwrap();
}
});
},
/**
* Transform links
* @param content
*/
transformLinks: function (content) {
// find all links and remove the links without href (will be destination anchors from word table of contents)
// aloha is not supporting anchors at the moment -- maybe rewrite anchors in headings to "invisible"
// in the test document there are anchors for whole paragraphs --> the whole P appear as link
content.find('a').each(function () {
if (typeof $(this).attr('href') === 'undefined') {
$(this).contents().unwrap();
}
});
},
/**
* Remove all comments
* @param content
*/
removeComments: function (content) {
var that = this;
// ok, remove all comments
content.contents().each(function () {
if (this.nodeType === 8) {
$(this).remove();
} else {
// do recursion
that.removeComments($(this));
}
});
},
/**
* Remove some unwanted tags from content pasted
* @param content
*/
unwrapTags: function (content) {
var that = this;
// Note: we exclude all elements (they will be spans) here, that have the class aloha-wai-lang
// TODO find a better solution for this (e.g. invent a more generic aloha class for all elements, that are
// somehow maintained by aloha, and are therefore allowed)
content.find('span,font,div').not('.aloha-wai-lang').each(function () {
if (this.nodeName == 'DIV') {
// safari and chrome cleanup for plain text paste with working linebreaks
if (this.innerHTML === '<br>') {
$(this).contents().unwrap();
} else {
$(Aloha.Markup.transformDomObject($(this), 'p').append('<br>')).contents().unwrap();
}
} else {
$(this).contents().unwrap();
}
});
},
/**
* Remove styles
* @param content
*/
removeStyles: function (content) {
var that = this;
// completely remove style tags
content.children('style').filter(function () {
return this.contentEditable !== 'false';
}).remove();
// remove style attributes and classes
content.children().filter(function () {
return this.contentEditable !== 'false';
}).each(function () {
$(this).removeAttr('style').removeClass();
that.removeStyles($(this));
});
},
/**
* Remove all elements which are in different namespaces
* @param content
*/
removeNamespacedElements: function ($content) {
// get all elements
$content.find('*').each(function () {
// try to determine the namespace prefix ('prefix' works for W3C
// compliant browsers, 'scopeName' for IE)
var nsPrefix = this.prefix ? this.prefix
: (this.scopeName ? this.scopeName : undefined);
// when the prefix is set (and different from 'HTML'), we remove the
// element
if ((nsPrefix && nsPrefix !== 'HTML') || this.nodeName.indexOf(':') >= 0) {
var $this = $(this), $contents = $this.contents();
if ($contents.length) {
// the element has contents, so unwrap the contents
$contents.unwrap();
} else {
// the element is empty, so remove it
$this.remove();
}
}
});
}
});
return GenericContentHandler;
});
| agpl-3.0 |
wwitzel3/juju | state/multiwatcher_internal_test.go | 26255 | // Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package state
import (
"container/list"
"fmt"
"sync"
"time"
"github.com/juju/errors"
jc "github.com/juju/testing/checkers"
gc "gopkg.in/check.v1"
"gopkg.in/mgo.v2"
"github.com/juju/juju/state/multiwatcher"
"github.com/juju/juju/state/watcher"
"github.com/juju/juju/testing"
)
var _ = gc.Suite(&storeSuite{})
type storeSuite struct {
testing.BaseSuite
}
var StoreChangeMethodTests = []struct {
about string
change func(all *multiwatcherStore)
expectRevno int64
expectContents []entityEntry
}{{
about: "empty at first",
change: func(*multiwatcherStore) {},
}, {
about: "add single entry",
change: func(all *multiwatcherStore) {
all.Update(&multiwatcher.MachineInfo{
Id: "0",
InstanceId: "i-0",
})
},
expectRevno: 1,
expectContents: []entityEntry{{
creationRevno: 1,
revno: 1,
info: &multiwatcher.MachineInfo{
Id: "0",
InstanceId: "i-0",
},
}},
}, {
about: "add two entries",
change: func(all *multiwatcherStore) {
all.Update(&multiwatcher.MachineInfo{
Id: "0",
InstanceId: "i-0",
})
all.Update(&multiwatcher.ServiceInfo{
Name: "wordpress",
Exposed: true,
})
},
expectRevno: 2,
expectContents: []entityEntry{{
creationRevno: 1,
revno: 1,
info: &multiwatcher.MachineInfo{
Id: "0",
InstanceId: "i-0",
},
}, {
creationRevno: 2,
revno: 2,
info: &multiwatcher.ServiceInfo{
Name: "wordpress",
Exposed: true,
},
}},
}, {
about: "update an entity that's not currently there",
change: func(all *multiwatcherStore) {
m := &multiwatcher.MachineInfo{Id: "1"}
all.Update(m)
},
expectRevno: 1,
expectContents: []entityEntry{{
creationRevno: 1,
revno: 1,
info: &multiwatcher.MachineInfo{Id: "1"},
}},
}, {
about: "mark removed on existing entry",
change: func(all *multiwatcherStore) {
all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"})
all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "1"})
StoreIncRef(all, multiwatcher.EntityId{"machine", "uuid", "0"})
all.Remove(multiwatcher.EntityId{"machine", "uuid", "0"})
},
expectRevno: 3,
expectContents: []entityEntry{{
creationRevno: 2,
revno: 2,
info: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "1"},
}, {
creationRevno: 1,
revno: 3,
refCount: 1,
removed: true,
info: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"},
}},
}, {
about: "mark removed on nonexistent entry",
change: func(all *multiwatcherStore) {
all.Remove(multiwatcher.EntityId{"machine", "uuid", "0"})
},
}, {
about: "mark removed on already marked entry",
change: func(all *multiwatcherStore) {
all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"})
all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "1"})
StoreIncRef(all, multiwatcher.EntityId{"machine", "uuid", "0"})
all.Remove(multiwatcher.EntityId{"machine", "uuid", "0"})
all.Update(&multiwatcher.MachineInfo{
EnvUUID: "uuid",
Id: "1",
InstanceId: "i-1",
})
all.Remove(multiwatcher.EntityId{"machine", "uuid", "0"})
},
expectRevno: 4,
expectContents: []entityEntry{{
creationRevno: 1,
revno: 3,
refCount: 1,
removed: true,
info: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"},
}, {
creationRevno: 2,
revno: 4,
info: &multiwatcher.MachineInfo{
EnvUUID: "uuid",
Id: "1",
InstanceId: "i-1",
},
}},
}, {
about: "mark removed on entry with zero ref count",
change: func(all *multiwatcherStore) {
all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"})
all.Remove(multiwatcher.EntityId{"machine", "uuid", "0"})
},
expectRevno: 2,
}, {
about: "delete entry",
change: func(all *multiwatcherStore) {
all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"})
all.delete(multiwatcher.EntityId{"machine", "uuid", "0"})
},
expectRevno: 1,
}, {
about: "decref of non-removed entity",
change: func(all *multiwatcherStore) {
m := &multiwatcher.MachineInfo{Id: "0"}
all.Update(m)
id := m.EntityId()
StoreIncRef(all, id)
entry := all.entities[id].Value.(*entityEntry)
all.decRef(entry)
},
expectRevno: 1,
expectContents: []entityEntry{{
creationRevno: 1,
revno: 1,
refCount: 0,
info: &multiwatcher.MachineInfo{Id: "0"},
}},
}, {
about: "decref of removed entity",
change: func(all *multiwatcherStore) {
m := &multiwatcher.MachineInfo{Id: "0"}
all.Update(m)
id := m.EntityId()
entry := all.entities[id].Value.(*entityEntry)
entry.refCount++
all.Remove(id)
all.decRef(entry)
},
expectRevno: 2,
},
}
func (s *storeSuite) TestStoreChangeMethods(c *gc.C) {
for i, test := range StoreChangeMethodTests {
all := newStore()
c.Logf("test %d. %s", i, test.about)
test.change(all)
assertStoreContents(c, all, test.expectRevno, test.expectContents)
}
}
func (s *storeSuite) TestChangesSince(c *gc.C) {
a := newStore()
// Add three entries.
var deltas []multiwatcher.Delta
for i := 0; i < 3; i++ {
m := &multiwatcher.MachineInfo{
EnvUUID: "uuid",
Id: fmt.Sprint(i),
}
a.Update(m)
deltas = append(deltas, multiwatcher.Delta{Entity: m})
}
// Check that the deltas from each revno are as expected.
for i := 0; i < 3; i++ {
c.Logf("test %d", i)
c.Assert(a.ChangesSince(int64(i)), gc.DeepEquals, deltas[i:])
}
// Check boundary cases.
c.Assert(a.ChangesSince(-1), gc.DeepEquals, deltas)
c.Assert(a.ChangesSince(99), gc.HasLen, 0)
// Update one machine and check we see the changes.
rev := a.latestRevno
m1 := &multiwatcher.MachineInfo{
EnvUUID: "uuid",
Id: "1",
InstanceId: "foo",
}
a.Update(m1)
c.Assert(a.ChangesSince(rev), gc.DeepEquals, []multiwatcher.Delta{{Entity: m1}})
// Make sure the machine isn't simply removed from
// the list when it's marked as removed.
StoreIncRef(a, multiwatcher.EntityId{"machine", "uuid", "0"})
// Remove another machine and check we see it's removed.
m0 := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}
a.Remove(m0.EntityId())
// Check that something that never saw m0 does not get
// informed of its removal (even those the removed entity
// is still in the list.
c.Assert(a.ChangesSince(0), gc.DeepEquals, []multiwatcher.Delta{{
Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "2"},
}, {
Entity: m1,
}})
c.Assert(a.ChangesSince(rev), gc.DeepEquals, []multiwatcher.Delta{{
Entity: m1,
}, {
Removed: true,
Entity: m0,
}})
c.Assert(a.ChangesSince(rev+1), gc.DeepEquals, []multiwatcher.Delta{{
Removed: true,
Entity: m0,
}})
}
func (s *storeSuite) TestGet(c *gc.C) {
a := newStore()
m := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}
a.Update(m)
c.Assert(a.Get(m.EntityId()), gc.Equals, m)
c.Assert(a.Get(multiwatcher.EntityId{"machine", "uuid", "1"}), gc.IsNil)
}
type storeManagerSuite struct {
testing.BaseSuite
}
var _ = gc.Suite(&storeManagerSuite{})
func (*storeManagerSuite) TestHandle(c *gc.C) {
sm := newStoreManagerNoRun(newTestBacking(nil))
// Add request from first watcher.
w0 := &Multiwatcher{all: sm}
req0 := &request{
w: w0,
reply: make(chan bool, 1),
}
sm.handle(req0)
assertWaitingRequests(c, sm, map[*Multiwatcher][]*request{
w0: {req0},
})
// Add second request from first watcher.
req1 := &request{
w: w0,
reply: make(chan bool, 1),
}
sm.handle(req1)
assertWaitingRequests(c, sm, map[*Multiwatcher][]*request{
w0: {req1, req0},
})
// Add request from second watcher.
w1 := &Multiwatcher{all: sm}
req2 := &request{
w: w1,
reply: make(chan bool, 1),
}
sm.handle(req2)
assertWaitingRequests(c, sm, map[*Multiwatcher][]*request{
w0: {req1, req0},
w1: {req2},
})
// Stop first watcher.
sm.handle(&request{
w: w0,
})
assertWaitingRequests(c, sm, map[*Multiwatcher][]*request{
w1: {req2},
})
assertReplied(c, false, req0)
assertReplied(c, false, req1)
// Stop second watcher.
sm.handle(&request{
w: w1,
})
assertWaitingRequests(c, sm, nil)
assertReplied(c, false, req2)
}
func (s *storeManagerSuite) TestHandleStopNoDecRefIfMoreRecentlyCreated(c *gc.C) {
// If the Multiwatcher hasn't seen the item, then we shouldn't
// decrement its ref count when it is stopped.
sm := newStoreManager(newTestBacking(nil))
mi := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}
sm.all.Update(mi)
StoreIncRef(sm.all, multiwatcher.EntityId{"machine", "uuid", "0"})
w := &Multiwatcher{all: sm}
// Stop the watcher.
sm.handle(&request{w: w})
assertStoreContents(c, sm.all, 1, []entityEntry{{
creationRevno: 1,
revno: 1,
refCount: 1,
info: mi,
}})
}
func (s *storeManagerSuite) TestHandleStopNoDecRefIfAlreadySeenRemoved(c *gc.C) {
// If the Multiwatcher has already seen the item removed, then
// we shouldn't decrement its ref count when it is stopped.
sm := newStoreManager(newTestBacking(nil))
mi := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}
sm.all.Update(mi)
id := multiwatcher.EntityId{"machine", "uuid", "0"}
StoreIncRef(sm.all, id)
sm.all.Remove(id)
w := &Multiwatcher{all: sm}
// Stop the watcher.
sm.handle(&request{w: w})
assertStoreContents(c, sm.all, 2, []entityEntry{{
creationRevno: 1,
revno: 2,
refCount: 1,
removed: true,
info: mi,
}})
}
func (s *storeManagerSuite) TestHandleStopDecRefIfAlreadySeenAndNotRemoved(c *gc.C) {
// If the Multiwatcher has already seen the item removed, then
// we should decrement its ref count when it is stopped.
sm := newStoreManager(newTestBacking(nil))
mi := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}
sm.all.Update(mi)
StoreIncRef(sm.all, multiwatcher.EntityId{"machine", "uuid", "0"})
w := &Multiwatcher{all: sm}
w.revno = sm.all.latestRevno
// Stop the watcher.
sm.handle(&request{w: w})
assertStoreContents(c, sm.all, 1, []entityEntry{{
creationRevno: 1,
revno: 1,
info: mi,
}})
}
func (s *storeManagerSuite) TestHandleStopNoDecRefIfNotSeen(c *gc.C) {
// If the Multiwatcher hasn't seen the item at all, it should
// leave the ref count untouched.
sm := newStoreManager(newTestBacking(nil))
mi := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}
sm.all.Update(mi)
StoreIncRef(sm.all, multiwatcher.EntityId{"machine", "uuid", "0"})
w := &Multiwatcher{all: sm}
// Stop the watcher.
sm.handle(&request{w: w})
assertStoreContents(c, sm.all, 1, []entityEntry{{
creationRevno: 1,
revno: 1,
refCount: 1,
info: mi,
}})
}
var respondTestChanges = [...]func(all *multiwatcherStore){
func(all *multiwatcherStore) {
all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"})
},
func(all *multiwatcherStore) {
all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "1"})
},
func(all *multiwatcherStore) {
all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "2"})
},
func(all *multiwatcherStore) {
all.Remove(multiwatcher.EntityId{"machine", "uuid", "0"})
},
func(all *multiwatcherStore) {
all.Update(&multiwatcher.MachineInfo{
EnvUUID: "uuid",
Id: "1",
InstanceId: "i-1",
})
},
func(all *multiwatcherStore) {
all.Remove(multiwatcher.EntityId{"machine", "uuid", "1"})
},
}
var (
respondTestFinalState = []entityEntry{{
creationRevno: 3,
revno: 3,
info: &multiwatcher.MachineInfo{
EnvUUID: "uuid",
Id: "2",
},
}}
respondTestFinalRevno = int64(len(respondTestChanges))
)
func (s *storeManagerSuite) TestRespondResults(c *gc.C) {
// We test the response results for a pair of watchers by
// interleaving notional Next requests in all possible
// combinations after each change in respondTestChanges and
// checking that the view of the world as seen by the watchers
// matches the actual current state.
// We decide whether if we make a request for a given
// watcher by inspecting a number n - bit i of n determines whether
// a request will be responded to after running respondTestChanges[i].
numCombinations := 1 << uint(len(respondTestChanges))
const wcount = 2
ns := make([]int, wcount)
for ns[0] = 0; ns[0] < numCombinations; ns[0]++ {
for ns[1] = 0; ns[1] < numCombinations; ns[1]++ {
sm := newStoreManagerNoRun(&storeManagerTestBacking{})
c.Logf("test %0*b", len(respondTestChanges), ns)
var (
ws []*Multiwatcher
wstates []watcherState
reqs []*request
)
for i := 0; i < wcount; i++ {
ws = append(ws, &Multiwatcher{})
wstates = append(wstates, make(watcherState))
reqs = append(reqs, nil)
}
// Make each change in turn, and make a request for each
// watcher if n and respond
for i, change := range respondTestChanges {
c.Logf("change %d", i)
change(sm.all)
needRespond := false
for wi, n := range ns {
if n&(1<<uint(i)) != 0 {
needRespond = true
if reqs[wi] == nil {
reqs[wi] = &request{
w: ws[wi],
reply: make(chan bool, 1),
}
sm.handle(reqs[wi])
}
}
}
if !needRespond {
continue
}
// Check that the expected requests are pending.
expectWaiting := make(map[*Multiwatcher][]*request)
for wi, w := range ws {
if reqs[wi] != nil {
expectWaiting[w] = []*request{reqs[wi]}
}
}
assertWaitingRequests(c, sm, expectWaiting)
// Actually respond; then check that each watcher with
// an outstanding request now has an up to date view
// of the world.
sm.respond()
for wi, req := range reqs {
if req == nil {
continue
}
select {
case ok := <-req.reply:
c.Assert(ok, jc.IsTrue)
c.Assert(len(req.changes) > 0, jc.IsTrue)
wstates[wi].update(req.changes)
reqs[wi] = nil
default:
}
c.Logf("check %d", wi)
wstates[wi].check(c, sm.all)
}
}
// Stop the watcher and check that all ref counts end up at zero
// and removed objects are deleted.
for wi, w := range ws {
sm.handle(&request{w: w})
if reqs[wi] != nil {
assertReplied(c, false, reqs[wi])
}
}
assertStoreContents(c, sm.all, respondTestFinalRevno, respondTestFinalState)
}
}
}
func (*storeManagerSuite) TestRespondMultiple(c *gc.C) {
sm := newStoreManager(newTestBacking(nil))
sm.all.Update(&multiwatcher.MachineInfo{Id: "0"})
// Add one request and respond.
// It should see the above change.
w0 := &Multiwatcher{all: sm}
req0 := &request{
w: w0,
reply: make(chan bool, 1),
}
sm.handle(req0)
sm.respond()
assertReplied(c, true, req0)
c.Assert(req0.changes, gc.DeepEquals, []multiwatcher.Delta{{Entity: &multiwatcher.MachineInfo{Id: "0"}}})
assertWaitingRequests(c, sm, nil)
// Add another request from the same watcher and respond.
// It should have no reply because nothing has changed.
req0 = &request{
w: w0,
reply: make(chan bool, 1),
}
sm.handle(req0)
sm.respond()
assertNotReplied(c, req0)
// Add two requests from another watcher and respond.
// The request from the first watcher should still not
// be replied to, but the later of the two requests from
// the second watcher should get a reply.
w1 := &Multiwatcher{all: sm}
req1 := &request{
w: w1,
reply: make(chan bool, 1),
}
sm.handle(req1)
req2 := &request{
w: w1,
reply: make(chan bool, 1),
}
sm.handle(req2)
assertWaitingRequests(c, sm, map[*Multiwatcher][]*request{
w0: {req0},
w1: {req2, req1},
})
sm.respond()
assertNotReplied(c, req0)
assertNotReplied(c, req1)
assertReplied(c, true, req2)
c.Assert(req2.changes, gc.DeepEquals, []multiwatcher.Delta{{Entity: &multiwatcher.MachineInfo{Id: "0"}}})
assertWaitingRequests(c, sm, map[*Multiwatcher][]*request{
w0: {req0},
w1: {req1},
})
// Check that nothing more gets responded to if we call respond again.
sm.respond()
assertNotReplied(c, req0)
assertNotReplied(c, req1)
// Now make a change and check that both waiting requests
// get serviced.
sm.all.Update(&multiwatcher.MachineInfo{Id: "1"})
sm.respond()
assertReplied(c, true, req0)
assertReplied(c, true, req1)
assertWaitingRequests(c, sm, nil)
deltas := []multiwatcher.Delta{{Entity: &multiwatcher.MachineInfo{Id: "1"}}}
c.Assert(req0.changes, gc.DeepEquals, deltas)
c.Assert(req1.changes, gc.DeepEquals, deltas)
}
func (*storeManagerSuite) TestRunStop(c *gc.C) {
sm := newStoreManager(newTestBacking(nil))
w := &Multiwatcher{all: sm}
err := sm.Stop()
c.Assert(err, jc.ErrorIsNil)
d, err := w.Next()
c.Assert(err, gc.ErrorMatches, "shared state watcher was stopped")
c.Assert(d, gc.HasLen, 0)
}
func (*storeManagerSuite) TestRun(c *gc.C) {
b := newTestBacking([]multiwatcher.EntityInfo{
&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"},
&multiwatcher.ServiceInfo{EnvUUID: "uuid", Name: "logging"},
&multiwatcher.ServiceInfo{EnvUUID: "uuid", Name: "wordpress"},
})
sm := newStoreManager(b)
defer func() {
c.Check(sm.Stop(), gc.IsNil)
}()
w := &Multiwatcher{all: sm}
checkNext(c, w, []multiwatcher.Delta{
{Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}},
{Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid", Name: "logging"}},
{Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid", Name: "wordpress"}},
}, "")
b.updateEntity(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0", InstanceId: "i-0"})
checkNext(c, w, []multiwatcher.Delta{
{Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0", InstanceId: "i-0"}},
}, "")
b.deleteEntity(multiwatcher.EntityId{"machine", "uuid", "0"})
checkNext(c, w, []multiwatcher.Delta{
{Removed: true, Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}},
}, "")
}
func (*storeManagerSuite) TestMultipleEnvironments(c *gc.C) {
b := newTestBacking([]multiwatcher.EntityInfo{
&multiwatcher.MachineInfo{EnvUUID: "uuid0", Id: "0"},
&multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "logging"},
&multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "wordpress"},
&multiwatcher.MachineInfo{EnvUUID: "uuid1", Id: "0"},
&multiwatcher.ServiceInfo{EnvUUID: "uuid1", Name: "logging"},
&multiwatcher.ServiceInfo{EnvUUID: "uuid1", Name: "wordpress"},
&multiwatcher.MachineInfo{EnvUUID: "uuid2", Id: "0"},
})
sm := newStoreManager(b)
defer func() {
c.Check(sm.Stop(), gc.IsNil)
}()
w := &Multiwatcher{all: sm}
checkNext(c, w, []multiwatcher.Delta{
{Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid0", Id: "0"}},
{Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "logging"}},
{Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "wordpress"}},
{Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid1", Id: "0"}},
{Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid1", Name: "logging"}},
{Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid1", Name: "wordpress"}},
{Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid2", Id: "0"}},
}, "")
b.updateEntity(&multiwatcher.MachineInfo{EnvUUID: "uuid1", Id: "0", InstanceId: "i-0"})
checkNext(c, w, []multiwatcher.Delta{
{Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid1", Id: "0", InstanceId: "i-0"}},
}, "")
b.deleteEntity(multiwatcher.EntityId{"machine", "uuid2", "0"})
checkNext(c, w, []multiwatcher.Delta{
{Removed: true, Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid2", Id: "0"}},
}, "")
b.updateEntity(&multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "logging", Exposed: true})
checkNext(c, w, []multiwatcher.Delta{
{Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "logging", Exposed: true}},
}, "")
}
func (*storeManagerSuite) TestMultiwatcherStop(c *gc.C) {
sm := newStoreManager(newTestBacking(nil))
defer func() {
c.Check(sm.Stop(), gc.IsNil)
}()
w := &Multiwatcher{all: sm}
done := make(chan struct{})
go func() {
checkNext(c, w, nil, ErrStopped.Error())
done <- struct{}{}
}()
err := w.Stop()
c.Assert(err, jc.ErrorIsNil)
<-done
}
func (*storeManagerSuite) TestMultiwatcherStopBecauseStoreManagerError(c *gc.C) {
b := newTestBacking([]multiwatcher.EntityInfo{&multiwatcher.MachineInfo{Id: "0"}})
sm := newStoreManager(b)
defer func() {
c.Check(sm.Stop(), gc.ErrorMatches, "some error")
}()
w := &Multiwatcher{all: sm}
// Receive one delta to make sure that the storeManager
// has seen the initial state.
checkNext(c, w, []multiwatcher.Delta{{Entity: &multiwatcher.MachineInfo{Id: "0"}}}, "")
c.Logf("setting fetch error")
b.setFetchError(errors.New("some error"))
c.Logf("updating entity")
b.updateEntity(&multiwatcher.MachineInfo{Id: "1"})
checkNext(c, w, nil, "some error")
}
func StoreIncRef(a *multiwatcherStore, id interface{}) {
entry := a.entities[id].Value.(*entityEntry)
entry.refCount++
}
func assertStoreContents(c *gc.C, a *multiwatcherStore, latestRevno int64, entries []entityEntry) {
var gotEntries []entityEntry
var gotElems []*list.Element
c.Check(a.list.Len(), gc.Equals, len(entries))
for e := a.list.Back(); e != nil; e = e.Prev() {
gotEntries = append(gotEntries, *e.Value.(*entityEntry))
gotElems = append(gotElems, e)
}
c.Assert(gotEntries, gc.DeepEquals, entries)
for i, ent := range entries {
c.Assert(a.entities[ent.info.EntityId()], gc.Equals, gotElems[i])
}
c.Assert(a.entities, gc.HasLen, len(entries))
c.Assert(a.latestRevno, gc.Equals, latestRevno)
}
// watcherState represents a Multiwatcher client's
// current view of the state. It holds the last delta that a given
// state watcher has seen for each entity.
type watcherState map[interface{}]multiwatcher.Delta
func (s watcherState) update(changes []multiwatcher.Delta) {
for _, d := range changes {
id := d.Entity.EntityId()
if d.Removed {
if _, ok := s[id]; !ok {
panic(fmt.Errorf("entity id %v removed when it wasn't there", id))
}
delete(s, id)
} else {
s[id] = d
}
}
}
// check checks that the watcher state matches that
// held in current.
func (s watcherState) check(c *gc.C, current *multiwatcherStore) {
currentEntities := make(watcherState)
for id, elem := range current.entities {
entry := elem.Value.(*entityEntry)
if !entry.removed {
currentEntities[id] = multiwatcher.Delta{Entity: entry.info}
}
}
c.Assert(s, gc.DeepEquals, currentEntities)
}
func assertNotReplied(c *gc.C, req *request) {
select {
case v := <-req.reply:
c.Fatalf("request was unexpectedly replied to (got %v)", v)
default:
}
}
func assertReplied(c *gc.C, val bool, req *request) {
select {
case v := <-req.reply:
c.Assert(v, gc.Equals, val)
default:
c.Fatalf("request was not replied to")
}
}
func assertWaitingRequests(c *gc.C, sm *storeManager, waiting map[*Multiwatcher][]*request) {
c.Assert(sm.waiting, gc.HasLen, len(waiting))
for w, reqs := range waiting {
i := 0
for req := sm.waiting[w]; ; req = req.next {
if i >= len(reqs) {
c.Assert(req, gc.IsNil)
break
}
c.Assert(req, gc.Equals, reqs[i])
assertNotReplied(c, req)
i++
}
}
}
type storeManagerTestBacking struct {
mu sync.Mutex
fetchErr error
entities map[multiwatcher.EntityId]multiwatcher.EntityInfo
watchc chan<- watcher.Change
txnRevno int64
}
func newTestBacking(initial []multiwatcher.EntityInfo) *storeManagerTestBacking {
b := &storeManagerTestBacking{
entities: make(map[multiwatcher.EntityId]multiwatcher.EntityInfo),
}
for _, info := range initial {
b.entities[info.EntityId()] = info
}
return b
}
func (b *storeManagerTestBacking) Changed(all *multiwatcherStore, change watcher.Change) error {
envUUID, changeId, ok := splitDocID(change.Id.(string))
if !ok {
return errors.Errorf("unexpected id format: %v", change.Id)
}
id := multiwatcher.EntityId{
Kind: change.C,
EnvUUID: envUUID,
Id: changeId,
}
info, err := b.fetch(id)
if err == mgo.ErrNotFound {
all.Remove(id)
return nil
}
if err != nil {
return err
}
all.Update(info)
return nil
}
func (b *storeManagerTestBacking) fetch(id multiwatcher.EntityId) (multiwatcher.EntityInfo, error) {
b.mu.Lock()
defer b.mu.Unlock()
if b.fetchErr != nil {
return nil, b.fetchErr
}
if info, ok := b.entities[id]; ok {
return info, nil
}
return nil, mgo.ErrNotFound
}
func (b *storeManagerTestBacking) Watch(c chan<- watcher.Change) {
b.mu.Lock()
defer b.mu.Unlock()
if b.watchc != nil {
panic("test backing can only watch once")
}
b.watchc = c
}
func (b *storeManagerTestBacking) Unwatch(c chan<- watcher.Change) {
b.mu.Lock()
defer b.mu.Unlock()
if c != b.watchc {
panic("unwatching wrong channel")
}
b.watchc = nil
}
func (b *storeManagerTestBacking) GetAll(all *multiwatcherStore) error {
b.mu.Lock()
defer b.mu.Unlock()
for _, info := range b.entities {
all.Update(info)
}
return nil
}
func (b *storeManagerTestBacking) Release() error {
return nil
}
func (b *storeManagerTestBacking) updateEntity(info multiwatcher.EntityInfo) {
b.mu.Lock()
defer b.mu.Unlock()
id := info.EntityId()
b.entities[id] = info
b.txnRevno++
if b.watchc != nil {
b.watchc <- watcher.Change{
C: id.Kind,
Id: ensureEnvUUID(id.EnvUUID, id.Id),
Revno: b.txnRevno, // This is actually ignored, but fill it in anyway.
}
}
}
func (b *storeManagerTestBacking) setFetchError(err error) {
b.mu.Lock()
defer b.mu.Unlock()
b.fetchErr = err
}
func (b *storeManagerTestBacking) deleteEntity(id multiwatcher.EntityId) {
b.mu.Lock()
defer b.mu.Unlock()
delete(b.entities, id)
b.txnRevno++
if b.watchc != nil {
b.watchc <- watcher.Change{
C: id.Kind,
Id: ensureEnvUUID(id.EnvUUID, id.Id),
Revno: -1,
}
}
}
var errTimeout = errors.New("no change received in sufficient time")
func getNext(c *gc.C, w *Multiwatcher, timeout time.Duration) ([]multiwatcher.Delta, error) {
var deltas []multiwatcher.Delta
var err error
ch := make(chan struct{}, 1)
go func() {
deltas, err = w.Next()
ch <- struct{}{}
}()
select {
case <-ch:
return deltas, err
case <-time.After(timeout):
}
return nil, errTimeout
}
func checkNext(c *gc.C, w *Multiwatcher, deltas []multiwatcher.Delta, expectErr string) {
d, err := getNext(c, w, 1*time.Second)
if expectErr != "" {
c.Check(err, gc.ErrorMatches, expectErr)
return
}
c.Assert(err, jc.ErrorIsNil)
checkDeltasEqual(c, d, deltas)
}
| agpl-3.0 |
Lektorium-LLC/edx-ora2 | openassessment/xblock/test/test_lms.py | 776 | """
Tests for the LMS compatibility mixin for the OpenAssessment block.
"""
from ddt import ddt
from .base import scenario, XBlockHandlerTestCase
@ddt
class LmsMixinTest(XBlockHandlerTestCase):
"""Test the simple LMS-specific attributes used during grading."""
@scenario('data/basic_scenario.xml')
def test_simple_methods(self, xblock):
self.assertTrue(xblock.has_score)
self.assertFalse(xblock.has_dynamic_children())
self.assertTrue(hasattr(xblock, 'weight'))
@scenario('data/basic_scenario.xml')
def test_max_score(self, xblock):
self.assertEqual(xblock.max_score(), 20)
@scenario('data/zero_points.xml')
def test_max_score_zero_option_criteria(self, xblock):
self.assertEqual(xblock.max_score(), 0)
| agpl-3.0 |
jorgevbo/server | plugins/metadata/lib/model/data/kCompareMetadataCondition.php | 3479 | <?php
/**
* @package plugins.metadata
* @subpackage model.data
*/
class kCompareMetadataCondition extends kCompareCondition
{
/**
* May contain the full xpath to the field in two formats
* 1. Slashed xPath, e.g. /metadata/myElementName
* 2. Using local-name function, e.g. /*[local-name()='metadata']/*[local-name()='myElementName']
* 3. Using only the field name, e.g. myElementName, it will be searched as //myElementName
*
* @var string
*/
private $xPath;
/**
* @var int
*/
private $profileId;
/**
* @var string
*/
private $profileSystemName;
/* (non-PHPdoc)
* @see kCondition::__construct()
*/
public function __construct($not = false)
{
$this->setType(MetadataPlugin::getConditionTypeCoreValue(MetadataConditionType::METADATA_FIELD_COMPARE));
parent::__construct($not);
}
/* (non-PHPdoc)
* @see kCondition::applyDynamicValues()
*/
protected function applyDynamicValues(kScope $scope)
{
parent::applyDynamicValues($scope);
$dynamicValues = $scope->getDynamicValues('{', '}');
if(is_array($dynamicValues) && count($dynamicValues))
{
$this->xPath = str_replace(array_keys($dynamicValues), $dynamicValues, $this->xPath);
if($this->profileSystemName)
$this->profileSystemName = str_replace(array_keys($dynamicValues), $dynamicValues, $this->profileSystemName);
}
}
/* (non-PHPdoc)
* @see kCondition::getFieldValue()
*/
public function getFieldValue(kScope $scope)
{
$profileId = $this->profileId;
if(!$profileId)
{
if(!$this->profileSystemName)
return null;
$profile = MetadataProfilePeer::retrieveBySystemName($this->profileSystemName, kCurrentContext::getCurrentPartnerId());
if(!$profile)
return null;
$profileId = $profile->getId();
}
$metadata = null;
if($scope instanceof accessControlScope || $scope instanceof kStorageProfileScope)
{
$metadata = MetadataPeer::retrieveByObject($profileId, MetadataObjectType::ENTRY, $scope->getEntryId());
}
elseif($scope instanceof kEventScope && $scope->getEvent() instanceof kApplicativeEvent)
{
$object = $scope->getEvent()->getObject();
if(kMetadataManager::isMetadataObject($object))
{
$objectType = kMetadataManager::getTypeNameFromObject($object);
$metadata = MetadataPeer::retrieveByObject($profileId, $objectType, $object->getId());
}
else if ($object instanceof Metadata)
{
$metadata = $object;
}
}
if(!$metadata)
return null;
$values = kMetadataManager::parseMetadataValues($metadata, $this->xPath);
if(is_null($values))
return null;
return array_map('intval', $values);
}
/**
* @return string $xPath
*/
public function getXPath()
{
return $this->xPath;
}
/**
* @return int $profileId
*/
public function getProfileId()
{
return $this->profileId;
}
/**
* @param string $xPath
*/
public function setXPath($xPath)
{
$this->xPath = $xPath;
}
/**
* @param int $profileId
*/
public function setProfileId($profileId)
{
$this->profileId = $profileId;
}
/**
* @return string
*/
public function getProfileSystemName()
{
return $this->profileSystemName;
}
/**
* @param string $profileSystemName
*/
public function setProfileSystemName($profileSystemName)
{
$this->profileSystemName = $profileSystemName;
}
/* (non-PHPdoc)
* @see kCompareCondition::shouldFieldDisableCache()
*/
public function shouldFieldDisableCache($scope)
{
return false;
}
}
| agpl-3.0 |
pratik136/ContinuousTests | AutoTest.Net/src/AutoTest.UI/RunMessageType.cs | 122 | namespace AutoTest.UI
{
public enum RunMessageType
{
Normal,
Failed,
Succeeded
}
}
| lgpl-2.1 |
MjAbuz/exist | test/src/xquery/indexing/IndexingTests.java | 205 | package xquery.indexing;
import xquery.TestRunner;
public class IndexingTests extends TestRunner {
@Override
protected String getDirectory() {
return "test/src/xquery/indexing";
}
}
| lgpl-2.1 |
lnu/nhibernate-core | src/NHibernate/Async/Event/Default/DefaultSaveEventListener.cs | 1369 | //------------------------------------------------------------------------------
// <auto-generated>
// This code was generated by AsyncGenerator.
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
// </auto-generated>
//------------------------------------------------------------------------------
using System;
using NHibernate.Engine;
namespace NHibernate.Event.Default
{
using System.Threading.Tasks;
using System.Threading;
public partial class DefaultSaveEventListener : DefaultSaveOrUpdateEventListener
{
protected override Task<object> PerformSaveOrUpdateAsync(SaveOrUpdateEvent @event, CancellationToken cancellationToken)
{
if (cancellationToken.IsCancellationRequested)
{
return Task.FromCanceled<object>(cancellationToken);
}
try
{
// this implementation is supposed to tolerate incorrect unsaved-value
// mappings, for the purpose of backward-compatibility
EntityEntry entry = @event.Session.PersistenceContext.GetEntry(@event.Entity);
if (entry != null && entry.Status != Status.Deleted)
{
return Task.FromResult<object>(EntityIsPersistent(@event));
}
else
{
return EntityIsTransientAsync(@event, cancellationToken);
}
}
catch (Exception ex)
{
return Task.FromException<object>(ex);
}
}
}
}
| lgpl-2.1 |
pjwilliams/mosesdecoder | contrib/promix/main.py | 3803 | #!/usr/bin/env python
#
# Implementation of PRO training and extensions to train phrase weights
#
import gzip
import logging
from numpy import array
import optparse
import os.path
import sys
from nbest import *
from sampler import *
from train import *
logging.basicConfig(format = "%(asctime)-15s %(message)s")
log = logging.getLogger('main')
log.setLevel(logging.DEBUG)
class Config:
def __init__(self):
self.parser = optparse.OptionParser(usage="%prog [options] ")
self.parser.add_option("-t", "--trainer", action="store",\
dest="trainer", metavar="TYPE", type="choice", choices=("pro","mix"),\
default="pro",\
help="type of trainer to run (pro,mix)")
self.parser.add_option("-n", "--nbest", action="append", \
dest="nbest", metavar="NBEST-FILE",\
help="nbest output file(s) from decoder")
self.parser.add_option("-S", "--scfile", action="append",\
dest="score", metavar="SCORE-FILE",\
help="score file(s) from extractor (in same order as nbests)")
self.parser.add_option("-p", "--phrase-table" , action="append",\
dest="ttable", metavar="TTABLE",\
help="ttable to be used in mixture model training")
self.parser.add_option("-i", "--input-file", action="store",\
dest="input_file", metavar="INPUT-FILE",
help="source text file")
self.parser.add_option("-m", "--moses-bin-dir", action="store",\
dest="moses_bin_dir", metavar="DIR",
help="directory containing Moses binaries",
default=os.path.expanduser("~/moses/bin"))
self.nbest_files = []
self.score_files = []
self.ttables = []
def parse(self,args=sys.argv[1:]):
(options,args) = self.parser.parse_args(args)
self.nbest_files = options.nbest
self.score_files = options.score
self.ttables = options.ttable
self.input_file = options.input_file
self.trainer = options.trainer
self.moses_bin_dir = options.moses_bin_dir
if not self.nbest_files:
self.nbest_files = ["data/esen.nc.nbest.segment"]
if not self.score_files:
self.score_files = ["data/esen.nc.scores"]
if len(self.nbest_files) != len(self.score_files):
self.parser.error("Must have equal numbers of score files and nbest files")
if self.trainer == "mix":
if not self.input_file or not self.ttables:
self.parser.error("Need to specify input file and ttables for mix training")
#if len(self.ttables) != 2:
# self.parser.error("Can only train mix model with 2 ttables at the moment")
def main():
config = Config()
config.parse()
samples = []
sampler = HopkinsMaySampler()
nbests = 0
for nbest_file,score_data_file in zip(config.nbest_files,config.score_files):
log.debug("nbest: " + nbest_file + "; score:" + score_data_file)
segments = False
if config.trainer == "mix": segments = True
for nbest in get_scored_nbests(nbest_file, score_data_file, config.input_file, segments=segments):
samples += sampler.sample(nbest)
nbests += 1
log.debug("Samples loaded")
trainer = None
if config.trainer == "mix":
# Add the phrase table scores
scorer = MosesPhraseScorer(config.ttables)
log.debug("Scoring samples...")
for sample in samples:
scorer.add_scores(sample.hyp1)
scorer.add_scores(sample.hyp2)
log.debug("...samples scored")
trainer = MixtureModelTrainer(samples)
elif config.trainer == "pro":
trainer = ProTrainer(samples)
else: assert(0)
log.debug("Starting training...")
weights,mix_weights = trainer.train(debug=False)
log.debug("...training complete")
for i,w in enumerate(weights):
print "F%d %10.8f" % (i,w)
for i,f in enumerate(mix_weights):
for j,w in enumerate(f):
print "M%d_%d %10.8f" % (i,j,w)
if __name__ == "__main__":
main()
| lgpl-2.1 |
Sanne/hibernate-ogm | mongodb/src/test/java/org/hibernate/ogm/datastore/mongodb/test/associations/ListGlobalTest.java | 864 | /*
* Hibernate OGM, Domain model persistence for NoSQL datastores
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
*/
package org.hibernate.ogm.datastore.mongodb.test.associations;
import java.util.Map;
import org.hibernate.ogm.backendtck.associations.collection.types.ListTest;
import org.hibernate.ogm.datastore.document.cfg.DocumentStoreProperties;
import org.hibernate.ogm.datastore.document.options.AssociationStorageType;
/**
* @author Emmanuel Bernard <[email protected]>
*/
public class ListGlobalTest extends ListTest {
@Override
protected void configure(Map<String, Object> settings) {
settings.put(
DocumentStoreProperties.ASSOCIATIONS_STORE,
AssociationStorageType.ASSOCIATION_DOCUMENT
);
}
}
| lgpl-2.1 |
fdlk/molgenis | molgenis-ontology/src/main/java/org/molgenis/ontology/sorta/controller/EntityCollectionResponse.java | 1818 | package org.molgenis.ontology.sorta.controller;
import java.util.List;
import java.util.Map;
import org.molgenis.data.DataService;
import org.molgenis.data.meta.model.EntityType;
import org.molgenis.security.core.UserPermissionEvaluator;
public class EntityCollectionResponse {
private final String href;
private final EntityTypeResponse meta;
private final int start;
private final int num;
private final long total;
private final String prevHref;
private final String nextHref;
private final List<Map<String, Object>> items;
public EntityCollectionResponse(
EntityPager entityPager,
List<Map<String, Object>> items,
String href,
EntityType meta,
UserPermissionEvaluator permissionService,
DataService dataService) {
this.href = href;
this.meta = meta != null ? new EntityTypeResponse(meta, permissionService, dataService) : null;
this.start = entityPager.getStart();
this.num = entityPager.getNum();
this.total = entityPager.getTotal();
Integer prevStart = entityPager.getPrevStart();
this.prevHref =
prevStart != null ? this.href + "?start=" + prevStart + "&num=" + this.num : null;
Integer nextStart = entityPager.getNextStart();
this.nextHref =
nextStart != null ? this.href + "?start=" + nextStart + "&num=" + this.num : null;
this.items = items;
}
public String getHref() {
return href;
}
public EntityTypeResponse getMeta() {
return meta;
}
public int getStart() {
return start;
}
public int getNum() {
return num;
}
public long getTotal() {
return total;
}
public String getPrevHref() {
return prevHref;
}
public String getNextHref() {
return nextHref;
}
public List<Map<String, Object>> getItems() {
return items;
}
}
| lgpl-3.0 |
fdlk/molgenis | molgenis-data/src/main/java/org/molgenis/data/transaction/TransactionConstants.java | 198 | package org.molgenis.data.transaction;
public final class TransactionConstants {
public static final String TRANSACTION_ID_RESOURCE_NAME = "transactionId";
private TransactionConstants() {}
}
| lgpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.