Skip to content

Commit

Permalink
Merge branch 'master' into ccr
Browse files Browse the repository at this point in the history
* master:
  Generalize remote license checker (#32971)
  Trim translog when safe commit advanced (#32967)
  Fix an inaccuracy in the dynamic templates documentation. (#32890)
  Logging: Use settings when building daemon threads (#32751)
  All Translog inner closes should happen after tragedy exception is set (#32674)
  HLREST: AwaitsFix ML Test
  Pass DiscoveryNode to initiateChannel (#32958)
  Add mzn and dz to unsupported locales (#32957)
  Use settings from the context in BootstrapChecks (#32908)
  Update docs for node specifications (#30468)
  HLRC: Forbid all Elasticsearch logging infra (#32784)
  Only configure publishing if it's applied externally (#32351)
  Fixes libs:dissect when in eclipse
  Protect ScriptedMetricIT test cases against failures on 0-doc shards (#32959) (#32968)
  [Kerberos] Add documentation for Kerberos realm (#32662)
  Watcher: Properly find next valid date in cron expressions (#32734)
  Fix some small issues in the getting started docs (#30346)
  Set forbidden APIs target compatibility to compiler java version   (#32935)
  Move connection listener to ConnectionManager (#32956)
  • Loading branch information
jasontedor committed Aug 20, 2018
2 parents ac75968 + 9050c7e commit 853eb1c
Show file tree
Hide file tree
Showing 80 changed files with 1,673 additions and 983 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -528,11 +528,12 @@ class BuildPlugin implements Plugin<Project> {
project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask ->
// The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it,
// just make a copy.
generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-${project.version}.pom"
doLast {
project.copy {
from generatePOMTask.destination
into "${project.buildDir}/distributions"
rename { "${project.archivesBaseName}-${project.version}.pom" }
rename { generatePOMTask.ext.pomFileName }
}
}
// build poms with assemble (if the assemble task exists)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,23 +19,19 @@
package org.elasticsearch.gradle.plugin

import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin
import nebula.plugin.info.scm.ScmInfoPlugin
import nebula.plugin.publishing.maven.MavenScmPlugin
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.NoticeTask
import org.elasticsearch.gradle.test.RestIntegTestTask
import org.elasticsearch.gradle.test.RunTask
import org.gradle.api.InvalidUserDataException
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.XmlProvider
import org.gradle.api.publish.maven.MavenPublication
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
import org.gradle.api.publish.maven.tasks.GenerateMavenPom
import org.gradle.api.tasks.SourceSet
import org.gradle.api.tasks.bundling.Zip
import org.gradle.jvm.tasks.Jar

import java.nio.file.Files
import java.nio.file.Path
import java.nio.file.StandardCopyOption
import java.util.regex.Matcher
import java.util.regex.Pattern
/**
Expand All @@ -55,16 +51,10 @@ public class PluginBuildPlugin extends BuildPlugin {
String name = project.pluginProperties.extension.name
project.archivesBaseName = name

if (project.pluginProperties.extension.hasClientJar) {
// for plugins which work with the transport client, we copy the jar
// file to a new name, copy the nebula generated pom to the same name,
// and generate a different pom for the zip
addClientJarPomGeneration(project)
addClientJarTask(project)
}
// while the jar isn't normally published, we still at least build a pom of deps
// in case it is published, for instance when other plugins extend this plugin
configureJarPom(project)
// set teh project description so it will be picked up by publishing
project.description = project.pluginProperties.extension.description

configurePublishing(project)

project.integTestCluster.dependsOn(project.bundlePlugin)
project.tasks.run.dependsOn(project.bundlePlugin)
Expand Down Expand Up @@ -94,6 +84,32 @@ public class PluginBuildPlugin extends BuildPlugin {
project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build
}

private void configurePublishing(Project project) {
// Only configure publishing if applied externally
if (project.pluginProperties.extension.hasClientJar) {
project.plugins.apply(MavenScmPlugin.class)
// Only change Jar tasks, we don't want a -client zip so we can't change archivesBaseName
project.tasks.withType(Jar) {
baseName = baseName + "-client"
}
// always configure publishing for client jars
project.plugins.apply(MavenScmPlugin.class)
project.publishing.publications.nebula(MavenPublication).artifactId(
project.pluginProperties.extension.name + "-client"
)
project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask ->
generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-client-${project.version}.pom"
}
} else {
project.plugins.withType(MavenPublishPlugin).whenPluginAdded {
project.publishing.publications.nebula(MavenPublication).artifactId(
project.pluginProperties.extension.name
)
}

}
}

private static void configureDependencies(Project project) {
project.dependencies {
compileOnly "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
Expand Down Expand Up @@ -161,33 +177,6 @@ public class PluginBuildPlugin extends BuildPlugin {
}

/** Adds a task to move jar and associated files to a "-client" name. */
protected static void addClientJarTask(Project project) {
Task clientJar = project.tasks.create('clientJar')
clientJar.dependsOn(project.jar, project.tasks.generatePomFileForClientJarPublication, project.javadocJar, project.sourcesJar)
clientJar.doFirst {
Path jarFile = project.jar.outputs.files.singleFile.toPath()
String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}")
Files.copy(jarFile, jarFile.resolveSibling(clientFileName), StandardCopyOption.REPLACE_EXISTING)

String clientPomFileName = clientFileName.replace('.jar', '.pom')
Files.copy(
project.tasks.generatePomFileForClientJarPublication.outputs.files.singleFile.toPath(),
jarFile.resolveSibling(clientPomFileName),
StandardCopyOption.REPLACE_EXISTING
)

String sourcesFileName = jarFile.fileName.toString().replace('.jar', '-sources.jar')
String clientSourcesFileName = clientFileName.replace('.jar', '-sources.jar')
Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName),
StandardCopyOption.REPLACE_EXISTING)

String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar')
String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar')
Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName),
StandardCopyOption.REPLACE_EXISTING)
}
project.assemble.dependsOn(clientJar)
}

static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/)

Expand All @@ -209,39 +198,11 @@ public class PluginBuildPlugin extends BuildPlugin {

/** Adds nebula publishing task to generate a pom file for the plugin. */
protected static void addClientJarPomGeneration(Project project) {
project.plugins.apply(MavenPublishPlugin.class)

project.publishing {
publications {
clientJar(MavenPublication) {
from project.components.java
artifactId = project.pluginProperties.extension.name + '-client'
pom.withXml { XmlProvider xml ->
Node root = xml.asNode()
root.appendNode('name', project.pluginProperties.extension.name)
root.appendNode('description', project.pluginProperties.extension.description)
root.appendNode('url', urlFromOrigin(project.scminfo.origin))
Node scmNode = root.appendNode('scm')
scmNode.appendNode('url', project.scminfo.origin)
}
}
}
}
project.plugins.apply(MavenScmPlugin.class)
project.description = project.pluginProperties.extension.description
}

/** Configure the pom for the main jar of this plugin */
protected static void configureJarPom(Project project) {
project.plugins.apply(ScmInfoPlugin.class)
project.plugins.apply(MavenPublishPlugin.class)

project.publishing {
publications {
nebula(MavenPublication) {
artifactId project.pluginProperties.extension.name
}
}
}
}

protected void addNoticeGeneration(Project project) {
File licenseFile = project.pluginProperties.extension.licenseFile
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ package org.elasticsearch.gradle.precommit
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin
import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask
import org.gradle.api.JavaVersion
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.file.FileCollection
Expand Down Expand Up @@ -101,6 +102,11 @@ class PrecommitTasks {
signaturesURLs = project.forbiddenApis.signaturesURLs +
[ getClass().getResource('/forbidden/es-server-signatures.txt') ]
}
// forbidden apis doesn't support Java 11, so stop at 10
String targetMajorVersion = (project.compilerJavaVersion.compareTo(JavaVersion.VERSION_1_10) > 0 ?
JavaVersion.VERSION_1_10 :
project.compilerJavaVersion).getMajorVersion()
targetCompatibility = Integer.parseInt(targetMajorVersion) >= 9 ?targetMajorVersion : "1.${targetMajorVersion}"
}
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,14 @@ org.apache.http.entity.ContentType#create(java.lang.String,java.lang.String)
org.apache.http.entity.ContentType#create(java.lang.String,java.nio.charset.Charset)
org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameValuePair[])

@defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users
org.elasticsearch.common.logging.DeprecationLogger
org.elasticsearch.common.logging.ESLoggerFactory
org.elasticsearch.common.logging.LogConfigurator
org.elasticsearch.common.logging.LoggerMessageFormat
org.elasticsearch.common.logging.Loggers
org.elasticsearch.common.logging.NodeNamePatternConverter
org.elasticsearch.common.logging.PrefixLogger

@defaultMessage We can't rely on log4j2 being on the classpath so don't log deprecations!
org.elasticsearch.common.xcontent.LoggingDeprecationHandler
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

package org.elasticsearch.client;

import org.apache.http.util.EntityUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
Expand All @@ -34,6 +35,7 @@
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.client.Request;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.rest.RestStatus;

Expand Down Expand Up @@ -174,6 +176,8 @@ public void testClusterHealthYellowClusterLevel() throws IOException {
request.timeout("5s");
ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync);

logger.info("Shard stats\n{}", EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/shards")).getEntity()));
assertYellowShards(response);
assertThat(response.getIndices().size(), equalTo(0));
}
Expand All @@ -186,6 +190,8 @@ public void testClusterHealthYellowIndicesLevel() throws IOException {
request.level(ClusterHealthRequest.Level.INDICES);
ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync);

logger.info("Shard stats\n{}", EntityUtils.toString(
client().performRequest(new Request("GET", "/_cat/shards")).getEntity()));
assertYellowShards(response);
assertThat(response.getIndices().size(), equalTo(2));
for (Map.Entry<String, ClusterIndexHealth> entry : response.getIndices().entrySet()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.elasticsearch.client;

import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest;
import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse;
Expand All @@ -36,6 +37,7 @@

import static org.hamcrest.Matchers.is;

@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32993")
public class MachineLearningIT extends ESRestHighLevelClientTestCase {

public void testPutJob() throws Exception {
Expand Down
69 changes: 58 additions & 11 deletions docs/reference/cluster.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -6,23 +6,70 @@
["float",id="cluster-nodes"]
== Node specification

Most cluster level APIs allow to specify which nodes to execute on (for
example, getting the node stats for a node). Nodes can be identified in
the APIs either using their internal node id, the node name, address,
custom attributes, or just the `_local` node receiving the request. For
example, here are some sample executions of nodes info:
Some cluster-level APIs may operate on a subset of the nodes which can be
specified with _node filters_. For example, the <<tasks,Task Management>>,
<<cluster-nodes-stats,Nodes Stats>>, and <<cluster-nodes-info,Nodes Info>> APIs
can all report results from a filtered set of nodes rather than from all nodes.

_Node filters_ are written as a comma-separated list of individual filters,
each of which adds or removes nodes from the chosen subset. Each filter can be
one of the following:

* `_all`, to add all nodes to the subset.
* `_local`, to add the local node to the subset.
* `_master`, to add the currently-elected master node to the subset.
* a node id or name, to add this node to the subset.
* an IP address or hostname, to add all matching nodes to the subset.
* a pattern, using `*` wildcards, which adds all nodes to the subset
whose name, address or hostname matches the pattern.
* `master:true`, `data:true`, `ingest:true` or `coordinating_only:true`, which
respectively add to the subset all master-eligible nodes, all data nodes,
all ingest nodes, and all coordinating-only nodes.
* `master:false`, `data:false`, `ingest:false` or `coordinating_only:false`,
which respectively remove from the subset all master-eligible nodes, all data
nodes, all ingest nodes, and all coordinating-only nodes.
* a pair of patterns, using `*` wildcards, of the form `attrname:attrvalue`,
which adds to the subset all nodes with a custom node attribute whose name
and value match the respective patterns. Custom node attributes are
configured by setting properties in the configuration file of the form
`node.attr.attrname: attrvalue`.

NOTE: node filters run in the order in which they are given, which is important
if using filters that remove nodes from the set. For example
`_all,master:false` means all the nodes except the master-eligible ones, but
`master:false,_all` means the same as `_all` because the `_all` filter runs
after the `master:false` filter.

NOTE: if no filters are given, the default is to select all nodes. However, if
any filters are given then they run starting with an empty chosen subset. This
means that filters such as `master:false` which remove nodes from the chosen
subset are only useful if they come after some other filters. When used on its
own, `master:false` selects no nodes.

Here are some examples of the use of node filters with the
<<cluster-nodes-info,Nodes Info>> APIs.

[source,js]
--------------------------------------------------
# Local
# If no filters are given, the default is to select all nodes
GET /_nodes
# Explicitly select all nodes
GET /_nodes/_all
# Select just the local node
GET /_nodes/_local
# Address
GET /_nodes/10.0.0.3,10.0.0.4
GET /_nodes/10.0.0.*
# Names
# Select the elected master node
GET /_nodes/_master
# Select nodes by name, which can include wildcards
GET /_nodes/node_name_goes_here
GET /_nodes/node_name_goes_*
# Attributes (set something like node.attr.rack: 2 in the config)
# Select nodes by address, which can include wildcards
GET /_nodes/10.0.0.3,10.0.0.4
GET /_nodes/10.0.0.*
# Select nodes by role
GET /_nodes/_all,master:false
GET /_nodes/data:true,ingest:true
GET /_nodes/coordinating_only:true
# Select nodes by custom attribute (e.g. with something like `node.attr.rack: 2` in the configuration file)
GET /_nodes/rack:2
GET /_nodes/ra*:2
GET /_nodes/ra*:2*
Expand Down
21 changes: 16 additions & 5 deletions docs/reference/cluster/nodes-hot-threads.asciidoc
Original file line number Diff line number Diff line change
@@ -1,12 +1,23 @@
[[cluster-nodes-hot-threads]]
== Nodes hot_threads

An API allowing to get the current hot threads on each node in the
cluster. Endpoints are `/_nodes/hot_threads`, and
`/_nodes/{nodesIds}/hot_threads`.
This API yields a breakdown of the hot threads on each selected node in the
cluster. Its endpoints are `/_nodes/hot_threads` and
`/_nodes/{nodes}/hot_threads`:

The output is plain text with a breakdown of each node's top hot
threads. Parameters allowed are:
[source,js]
--------------------------------------------------
GET /_nodes/hot_threads
GET /_nodes/nodeId1,nodeId2/hot_threads
--------------------------------------------------
// CONSOLE

The first command gets the hot threads of all the nodes in the cluster. The
second command gets the hot threads of only `nodeId1` and `nodeId2`. Nodes can
be selected using <<cluster-nodes,node filters>>.

The output is plain text with a breakdown of each node's top hot threads. The
allowed parameters are:

[horizontal]
`threads`:: number of hot threads to provide, defaults to 3.
Expand Down
9 changes: 9 additions & 0 deletions docs/reference/cluster/stats.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -213,3 +213,12 @@ Will return, for example:
// 3. All of the numbers and strings on the right hand side of *every* field in
// the response are ignored. So we're really only asserting things about the
// the shape of this response, not the values in it.

This API can be restricted to a subset of the nodes using the `?nodeId`
parameter, which accepts <<cluster-nodes,node filters>>:

[source,js]
--------------------------------------------------
GET /_cluster/stats?nodeId=node1,node*,master:false
--------------------------------------------------
// CONSOLE
Loading

0 comments on commit 853eb1c

Please sign in to comment.