apache flink源码挖坑 (未完待续)

​ By yyz940922原创

项目模块 (除去.git, .github, .idea, docs等):

  • flink-annotations: flink注解

    • org.apache.flink.annotation 注解类

    • Experimental.java (实验性注解)

    package org.apache.flink.annotation;
    
    import java.lang.annotation.Documented;
    import java.lang.annotation.ElementType;
    import java.lang.annotation.Target;
    
    /**
     * Annotation to mark classes for experimental use.
     *
     * <p>Classes with this annotation are neither battle-tested nor stable, and may be changed or removed in future versions.
     * 有该注解的类未经过考验, 且不一定稳定, 在未来版本可能被移除
     * 
     * <p>This annotation also excludes classes with evolving interfaces / signatures
     * annotated with {@link Public} and {@link PublicEvolving}.
     * 该注解排除了带有Public注解和PublicEvolving注解的接口或署名
     */
    @Documented
    @Target({ElementType.TYPE, ElementType.METHOD, ElementType.FIELD, ElementType.CONSTRUCTOR })
    @Public
    public @interface Experimental {
    }
    
    • Internal.java
    package org.apache.flink.annotation;
    
    import java.lang.annotation.Documented;
    import java.lang.annotation.ElementType;
    import java.lang.annotation.Target;
    
    /**
     * Interface to mark methods within stable, public APIs as an internal developer API.
     * 该注释用于将带有稳定公开API的方法标注为内部开发者API
     * <p>Developer APIs are stable but internal to Flink and might change across releases.
     * 开发者API虽然稳定, 但是为flink内部所有且可能会随着版本迭代改变
     */
    @Documented
    @Target({ ElementType.TYPE, ElementType.METHOD, ElementType.CONSTRUCTOR })
    @Public
    public @interface Internal {
    }
    
    • Public.java
    package org.apache.flink.annotation;
    
    import java.lang.annotation.Documented;
    import java.lang.annotation.ElementType;
    import java.lang.annotation.Target;
    
    /**
     * Annotation for marking classes as public, stable interfaces.
     * 用于标注稳定公开的接口
     *
     * <p>Classes, methods and fields with this annotation are stable across minor releases (1.0, 1.1, 1.2). In other words, applications using @Public annotated classes will compile against newer versions of the same major release.
     *  带有该注解的类, 方法和域在1.0, 1.1, 1.2最小发行版是稳定的。也就是说带有Public注解的类会被相
     *  同大版本的flink编译
     * <p>Only major releases (1.0, 2.0, 3.0) can break interfaces with this annotation.
     * 只有大版本不同才会破坏带有此接注解的接口
     */
    @Documented
    @Target(ElementType.TYPE)
    @Public
    public @interface Public {}
    
    • PublicEvolving.java
    package org.apache.flink.annotation;
    
    import java.lang.annotation.Documented;
    import java.lang.annotation.ElementType;
    import java.lang.annotation.Target;
    
    /**
     * Annotation to mark classes and methods for public use, but with evolving interfaces.
     * 带有该注解的类和方法可被公开使用, 但是需要实现接口
     *
     * <p>Classes and methods with this annotation are intended for public use and have 
     * stable behavior.
     * 带有该注解的方法和类拟被公共使用且表现稳定 
     *
     * However, their interfaces and signatures are not considered to be stable and might be changed across versions.
     * 然鹅, 他们所实现的接口和署名不一定稳定, 可能随着版本迭代而改变
     *
     * <p>This annotation also excludes methods and classes with evolving interfaces /  
     * signatures within classes annotated with {@link Public}.
     * 该接口还排除了带有Public注解的接口/署名 的类和方法
     */
    @Documented
    @Target({ ElementType.TYPE, ElementType.METHOD, ElementType.FIELD, ElementType.CONSTRUCTOR })
    @Public
    public @interface PublicEvolving {
    }
    
    • VisibleForTesting
    package org.apache.flink.annotation;
    
    import java.lang.annotation.Documented;
    import java.lang.annotation.ElementType;
    import java.lang.annotation.Target;
    
    /**
     * This annotations declares that a function, field, constructor, or entire type, is only visible for testing purposes.
     * 带有该注解的方法, 域, 构造器 或 整个类型 只有测试可见
     *
     * <p>This annotation is typically attached when for example a method should be {@code 
     * private}
     * 该注解为典型依附注解, 比如依附于私有方法
     * (because it is not intended to be called externally), but cannot be declared private, because some tests need to have access to it.
     * 因为它并不倾向于被外部调取, 但又不能被声明为私有的(因为有些测试需要获取它)。
     */
    @Documented
    @Target({ ElementType.TYPE, ElementType.METHOD, ElementType.FIELD, ElementType.CONSTRUCTOR })
    @Internal
    public @interface VisibleForTesting {}
    
    • org.apache.flink.annotation.docs 注解文件类

      • ConfigGroup.java
      package org.apache.flink.annotation.docs;
      
      import org.apache.flink.annotation.Internal;
      
      import java.lang.annotation.Target;
      
      /**
       * A class that specifies a group of config options. The name of the group will be
       * used as the basis for the filename of the generated html file, as defined in 
       * {@link ConfigOptionsDocGenerator}.
       * 这是一个明确规定了一组配置选项的类。该组的名字会作为产生的html文件的初始名称, 这
       * 在ConfigOptionsDocGenerator(配置选项文件生成器)已经定义过了。
       *
       * @see ConfigGroups
       */
      @Target({})
      @Internal
      public @interface ConfigGroup {
        String name();
        String keyPrefix();
      }
      
      • ConfigGroups.java
      package org.apache.flink.annotation.docs;
      
      import org.apache.flink.annotation.Internal;
      
      import java.lang.annotation.ElementType;
      import java.lang.annotation.Retention;
      import java.lang.annotation.RetentionPolicy;
      import java.lang.annotation.Target;
      
      /**
       * Annotation used on classes containing config options that enables the separation 
       * of options into different tables based on key prefixes. 
       * 该注解被用于包含能够基于key前缀将选项分离入不同表中的类。
       *
       * A config option is assigned to a {@link ConfigGroup} if the option key matches the   * group prefix. 
       * 如果配置选项的key与组的前缀相匹配, 则其等价于ConfigGroup注解。
       *
       * If a key matches multiple prefixes the longest matching prefix takes priority. An   * option is never assigned to multiple groups. 
       * 如果一个key匹配多个前缀, 最长的匹配的前缀有优先权。所以一个选项不要被分配给多个组。
       *
       * Options that don't match any group are implicitly added to a default group.
       * 如果选项不匹配任何组, 该选项会被隐式添加入一个默认组
       */
      @Target(ElementType.TYPE)
      @Retention(RetentionPolicy.RUNTIME)
      @Internal
      public @interface ConfigGroups {
        ConfigGroup[] groups() default {};
      }
      
      • Documentation.java
      package org.apache.flink.annotation.docs;
      
      import org.apache.flink.annotation.Internal;
      
      import java.lang.annotation.ElementType;
      import java.lang.annotation.Retention;
      import java.lang.annotation.RetentionPolicy;
      import java.lang.annotation.Target;
      
      /**
       * Collection of annotations to modify the behavior of the documentation generators.
       * 被用于修改文档生成器行为的注解集合
       */
      public final class Documentation {
      
        /**
         * Annotation used on config option fields to override the documented default.
         * 重写被documented注释的默认配置选项
         */
        @Target(ElementType.FIELD)
        @Retention(RetentionPolicy.RUNTIME)
        @Internal
        public @interface OverrideDefault {
          String value();
        }
      
        /**
         * Annotation used on config option fields to include them in the "Common   
         * Options" section.
         * 用于将配置选项域添加入常用选项
         *
         * <p>The {@link CommonOption#position()} argument controls the position in the 
         * generated table, with lower values being placed at the top. Fields with the 
         * same position are sorted alphabetically by key.
         * 常用选项中的位置参数控制了该配置选项域在表中的位置, 越低的值越在头部。相同位置的域按
         * key的字典序排序
         */
        @Target(ElementType.FIELD)
        @Retention(RetentionPolicy.RUNTIME)
        @Internal
        public @interface CommonOption {
              // 位置的内存
          int POSITION_MEMORY = 10;
              // 位置的并行度(与拓扑结构相关)
          int POSITION_PARALLELISM_SLOTS = 20;
              // 容错率系数
          int POSITION_FAULT_TOLERANCE = 30;
              // 位置高可用系数
          int POSITION_HIGH_AVAILABILITY = 40;
              // 位置安全系数
          int POSITION_SECURITY = 50;
      
          int position() default Integer.MAX_VALUE;
        }
      
        /**
         * Annotation used on table config options for adding meta data labels.
         * 用于向标配置选项表中添加元数据标签的注解
         *
         * <p>The {@link TableOption#execMode()} argument indicates the execution mode 
         * the config works for (batch, streaming or both).
         * 表选项执行模式参数表明了工作配置的执行方式(批处理, 流 或 两者都用)
         */
        @Target(ElementType.FIELD)
        @Retention(RetentionPolicy.RUNTIME)
        @Internal
        public @interface TableOption {
          ExecMode execMode();
        }
      
        /**
         * The execution mode the config works for.
         * 工作配置执行方式(枚举类, 单例)
         */
        public enum ExecMode {
      
          BATCH("Batch"), STREAMING("Streaming"), BATCH_STREAMING("Batch and Streaming");
      
          private final String name;
      
          ExecMode(String name) {
            this.name = name;
          }
      
          @Override
          public String toString() {
            return name;
          }
        }
      
        /**
         * Annotation used on config option fields to exclude the config option from 
         * documentation.
         * 将配置选项从文档中排除的注解
         */
        @Target(ElementType.FIELD)
        @Retention(RetentionPolicy.RUNTIME)
        @Internal
        public @interface ExcludeFromDocumentation {
          /**
           * The optional reason why the config option is excluded from documentation.
           * 可将该值替换为将此配置选项从文档中排除的原因。
           */
          String value() default "";
        }
      
        private Documentation(){
        }
      }
      
      
  • flink-clients

    • org.apache.flink.client

      • ClientUtils.java 客户端工具类
      package org.apache.flink.client;
      
      import org.apache.flink.runtime.execution.librarycache.FlinkUserCodeClassLoaders;
      
      import java.io.File;
      import java.io.IOException;
      import java.net.URISyntaxException;
      import java.net.URL;
      import java.util.List;
      import java.util.jar.JarFile;
      
      /**
       * Utility functions for Flink client.
       */
      public enum ClientUtils {
        ;
        // 检查Jar文件
        public static void checkJarFile(URL jar) throws IOException {
          File jarFile;
          try {
            jarFile = new File(jar.toURI());
          } catch (URISyntaxException e) {
            throw new IOException("JAR file path is invalid '" + jar + '\'');
          }
              // jar 文件不存在
          if (!jarFile.exists()) {
            throw new IOException("JAR file does not exist '" + jarFile.getAbsolutePath() + '\'');
          }
              // jar 文件不可读取
          if (!jarFile.canRead()) {
            throw new IOException("JAR file can't be read '" + jarFile.getAbsolutePath() + '\'');
          }
      
          try (JarFile ignored = new JarFile(jarFile)) {
            // verify that we can open the Jar file 验证jar文件能否打开
          } catch (IOException e) {
            throw new IOException("Error while opening jar file '" + jarFile.getAbsolutePath() + '\'', e);
          }
        }
        // 创建用户代码加载器
        public static ClassLoader buildUserCodeClassLoader(List<URL> jars, List<URL> classpaths, ClassLoader parent) {
          URL[] urls = new URL[jars.size() + classpaths.size()];
          for (int i = 0; i < jars.size(); i++) {
            urls[i] = jars.get(i);
          }
          for (int i = 0; i < classpaths.size(); i++) {
            urls[i + jars.size()] = classpaths.get(i);
          }
          return FlinkUserCodeClassLoaders.parentFirst(urls, parent);
        }
      }
      
      
      • LocalExecutor.java 本地执行器
      package org.apache.flink.client;
      
      import org.apache.flink.api.common.JobExecutionResult;
      import org.apache.flink.api.common.Plan;
      import org.apache.flink.api.common.PlanExecutor;
      import org.apache.flink.configuration.ConfigConstants;
      import org.apache.flink.configuration.Configuration;
      import org.apache.flink.configuration.RestOptions;
      import org.apache.flink.configuration.TaskManagerOptions;
      import org.apache.flink.optimizer.DataStatistics;
      import org.apache.flink.optimizer.Optimizer;
      import org.apache.flink.optimizer.plan.OptimizedPlan;
      import org.apache.flink.optimizer.plantranslate.JobGraphGenerator;
      import org.apache.flink.runtime.jobgraph.JobGraph;
      import org.apache.flink.runtime.minicluster.JobExecutorService;
      import org.apache.flink.runtime.minicluster.MiniCluster;
      import org.apache.flink.runtime.minicluster.MiniClusterConfiguration;
      import org.apache.flink.runtime.minicluster.RpcServiceSharing;
      
      import static org.apache.flink.util.Preconditions.checkNotNull;
      
      /**
       * A PlanExecutor that runs Flink programs on a local embedded Flink runtime instance.
       * 一个会在本地嵌入式运行实例上跑Flink程序的计划执行器。
       *
       * <p>By simply calling the {@link #executePlan(org.apache.flink.api.common.Plan)} 
       * method, this executor still start up and shut down again immediately after the program 
       * finished.</p>
       * 通过调用executePlan() 方法, 该执行器就会启动执行程序并在程序结束时迅速关闭。
       *
       * <p>To use this executor to execute many dataflow programs that constitute one job 
       * together,then this executor needs to be explicitly started, to keep running across 
       * several executions.</p>
       * 当使用该执行器执行一个有多个数据流程序组成的任务时, 该执行器需要被明确地启动来穿过许多的执行程
       * 序。
       */
      public class LocalExecutor extends PlanExecutor {
      
        /** Custom user configuration for the execution. 客户用户配置文件*/
        private final Configuration baseConfiguration;
      
        public LocalExecutor() {
          this(new Configuration());
        }
      
        public LocalExecutor(Configuration conf) {
          this.baseConfiguration = checkNotNull(conf);
        }
        // 根据配置文件信息创建工作执行服务
        private JobExecutorService createJobExecutorService(Configuration configuration) throws Exception {
              // 如果配置文件中没有捆绑端口, 就将端口设置为0
          if (!configuration.contains(RestOptions.BIND_PORT)) {
            configuration.setString(RestOptions.BIND_PORT, "0");
          }
          // 使用了构造器模式, 生成不变的最小集群配置文件
          final MiniClusterConfiguration miniClusterConfiguration = new MiniClusterConfiguration.Builder()
            .setConfiguration(configuration)
            .setNumTaskManagers( // 任务控制器数量
              configuration.getInteger(
                ConfigConstants.LOCAL_NUMBER_TASK_MANAGER,
                ConfigConstants.DEFAULT_LOCAL_NUMBER_TASK_MANAGER))
            .setRpcServiceSharing(RpcServiceSharing.SHARED) // 设置远程服务共享
            .setNumSlotsPerTaskManager( // 设置每个任务管理器的插槽数量
              configuration.getInteger(
                TaskManagerOptions.NUM_TASK_SLOTS, 1))
            .build();
          // 初始化最小集群
          final MiniCluster miniCluster = new MiniCluster(miniClusterConfiguration);
          miniCluster.start();
      
          configuration.setInteger(RestOptions.PORT, miniCluster.getRestAddress().get().getPort());
      
          return miniCluster;
        }
      
        /**
         * Executes the given program on a local runtime and waits for the job to finish.
         * 在本地运行程序并等待程序结束
         *
         * <p>If the executor has not been started before, this starts the executor and shuts 
         * it down after the job finished. If the job runs in session mode, the executor is 
         * kept alive until no more references to the executor exist.</p>
         * 如果该执行器此前没有被启动过, 那么这次启动会在任务完成后自动关闭该执行器
         *
         * @param plan The plan of the program to execute.
         * 执行的计划
         * @return The net runtime of the program, in milliseconds.
         * 网络运行时间(毫秒)
         *
         * @throws Exception Thrown, if either the startup of the local execution context, or 
         * the execution caused an exception.
         * 开启本地执行内容或执行本身导致的异常
         */
        @Override
        public JobExecutionResult executePlan(Plan plan) throws Exception {
              // 查看方法非空
          checkNotNull(plan);
      
              // 初始化任务执行器的服务配置(不变)
          final Configuration jobExecutorServiceConfiguration = configureExecution(plan);
      
          try (final JobExecutorService executorService = createJobExecutorService(jobExecutorServiceConfiguration)) {
      
                  // 初始化优化程序
            Optimizer pc = new Optimizer(new DataStatistics(), jobExecutorServiceConfiguration);
            OptimizedPlan op = pc.compile(plan);
      
                  // 初始化任务视图生成器
            JobGraphGenerator jgg = new JobGraphGenerator(jobExecutorServiceConfiguration);
                  
                  // 初始化任务视图
            JobGraph jobGraph = jgg.compileJobGraph(op, plan.getJobId());
      
                  // 执行任务阻塞, 返回任务执行结果
            return executorService.executeJobBlocking(jobGraph);
          }
        }
        // 配置执行
        private Configuration configureExecution(final Plan plan) {
              // 初始化执行器配置
          final Configuration executorConfiguration = createExecutorServiceConfig(plan);
              // 设置计划并行数
          setPlanParallelism(plan, executorConfiguration);
          return executorConfiguration;
        }
        // 创建执行器服务配置
        private Configuration createExecutorServiceConfig(final Plan plan) {
          final Configuration newConfiguration = new Configuration();
              // 将添加的配置选项的任务槽数量作为key, 将计划的最大并行数作为value添加至新配置文件
          newConfiguration.setInteger(TaskManagerOptions.NUM_TASK_SLOTS, plan.getMaximumParallelism());
              // 将所有基本配置信息添加至新配置中
          newConfiguration.addAll(baseConfiguration);
          return newConfiguration;
        }
        // 设置并行数
        private void setPlanParallelism(final Plan plan, final Configuration executorServiceConfig) {
          // TODO: Set job's default parallelism to max number of slots
              // 将任务的默认并行数设置为任务槽的最大数(任务管理器槽数 * 任务管理器数量)
          final int slotsPerTaskManager = executorServiceConfig.getInteger(
              TaskManagerOptions.NUM_TASK_SLOTS, plan.getMaximumParallelism());
          final int numTaskManagers = executorServiceConfig.getInteger(
              ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 1);
      
          plan.setDefaultParallelism(slotsPerTaskManager * numTaskManagers);
        }
      }
      
      
      • RemoteExecutor.java 远程执行器
      package org.apache.flink.client;
      
      import org.apache.flink.api.common.JobExecutionResult;
      import org.apache.flink.api.common.Plan;
      import org.apache.flink.api.common.PlanExecutor;
      import org.apache.flink.client.program.ClusterClient;
      import org.apache.flink.client.program.rest.RestClusterClient;
      import org.apache.flink.configuration.Configuration;
      import org.apache.flink.configuration.JobManagerOptions;
      import org.apache.flink.configuration.RestOptions;
      import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
      
      import java.net.InetSocketAddress;
      import java.net.URL;
      import java.util.Collections;
      import java.util.List;
      
      import static org.apache.flink.util.Preconditions.checkNotNull;
      
      /**
       * The RemoteExecutor is a {@link org.apache.flink.api.common.PlanExecutor} that takes the program and ships it to a remote Flink cluster for execution.
       * 远程执行器是一个获取程序并将它运送至一个远程Flink集群去执行的计划执行器
       *
       * <p>The RemoteExecutor is pointed at the JobManager and gets the program and (if 
       * necessary) the set of libraries that need to be shipped together with the program.</p>
       * 远程执行器指向任务管理器并获取要执行的程序。(如果需要的话),它会将 所需的lib文件同程序一起装
       * 载。
       *
       * <p>The RemoteExecutor is used in the {@link 
       * org.apache.flink.api.java.RemoteEnvironment} to remotely execute program parts.</p>
       * 远程执行器在远程环境中被用于远程执行部分程序
       */
      public class RemoteExecutor extends PlanExecutor {
      
          // jar文件列表
        private final List<URL> jarFiles;
        // 全局类路径列表
        private final List<URL> globalClasspaths;
        // 客户端配置
        private final Configuration clientConfiguration;
      
        private int defaultParallelism = 1;
      
        public RemoteExecutor(String hostname, int port) {
          this(hostname, port, new Configuration(), Collections.emptyList(), Collections.emptyList());
        }
      
        public RemoteExecutor(
          String hostname,
          int port,
          Configuration clientConfiguration,
          List<URL> jarFiles,
          List<URL> globalClasspaths) {
          this(new InetSocketAddress(hostname, port), clientConfiguration, jarFiles, globalClasspaths);
        }
      
        public RemoteExecutor(
          InetSocketAddress inet,
          Configuration clientConfiguration,
          List<URL> jarFiles,
          List<URL> globalClasspaths) {
          this.clientConfiguration = clientConfiguration;
          this.jarFiles = jarFiles;
          this.globalClasspaths = globalClasspaths;
      
          clientConfiguration.setString(JobManagerOptions.ADDRESS, inet.getHostName());
          clientConfiguration.setInteger(JobManagerOptions.PORT, inet.getPort());
          clientConfiguration.setInteger(RestOptions.PORT, inet.getPort());
        }
      
        // ------------------------------------------------------------------------
        //  Properties
        // ------------------------------------------------------------------------
      
        /**
         * Sets the parallelism that will be used when neither the program does not define
         * any parallelism at all.
         * 当没程序没有定义任何的并行量时, 需要设置被使用的并行量
         *
         * @param defaultParallelism The default parallelism for the executor.
         * 执行器的默认并行量
         */
        public void setDefaultParallelism(int defaultParallelism) {
          if (defaultParallelism < 1) {
            throw new IllegalArgumentException("The default parallelism must be at least one");
          }
          this.defaultParallelism = defaultParallelism;
        }
      
        /**
         * Gets the parallelism that will be used when neither the program does not define
         * any parallelism at all.
         * 获取默认并行量
         *
         * @return The default parallelism for the executor.
         */
        public int getDefaultParallelism() {
          return defaultParallelism;
        }
      
        // ------------------------------------------------------------------------
        //  Executing programs
        // ------------------------------------------------------------------------
      
        @Override
        public JobExecutionResult executePlan(Plan plan) throws Exception {
              // 查看计划是否为空
          checkNotNull(plan);
      
          try (ClusterClient<?> client = new RestClusterClient<>(clientConfiguration, "RemoteExecutor")) {
                  // 类加载器, 双亲委派
            ClassLoader classLoader = ClientUtils.buildUserCodeClassLoader(jarFiles, globalClasspaths, getClass().getClassLoader());
      
                  // 返回任务提交结果
            return client.run(
              plan,
              jarFiles,
              globalClasspaths,
              classLoader,
              defaultParallelism,
              SavepointRestoreSettings.none()).getJobExecutionResult();
          }
        }
      }
      
      
      • org.apache.flink.client.cli

        • CustomCommandLine 客户端命令行

          package org.apache.flink.client.cli;
          
          import org.apache.flink.client.deployment.ClusterDescriptor;
          import org.apache.flink.client.deployment.ClusterSpecification;
          import org.apache.flink.util.FlinkException;
          
          import org.apache.commons.cli.CommandLine;
          import org.apache.commons.cli.Options;
          
          import javax.annotation.Nullable;
          
          /**
           * Custom command-line interface to load hooks for the command-line interface.
           * 加载与命令行接口相挂钩的客户端命令行接口
           */
          public interface CustomCommandLine<T> {
          
            /**
             * Signals whether the custom command-line wants to execute or not.
             * 判断该客户端命令行是否需要被执行的信号
             * @param commandLine The command-line options
             * @return True if the command-line wants to run, False otherwise
             */
            boolean isActive(CommandLine commandLine);
          
            /**
             * Gets the unique identifier of this CustomCommandLine.
             * 获取该命令行的唯一识别标识
             * @return A unique identifier
             */
            String getId();
          
            /**
             * Adds custom options to the existing run options.
             * 将用户选项添加至已存在的运行选项
             * @param baseOptions The existing options.
             */
            void addRunOptions(Options baseOptions);
          
            /**
             * Adds custom options to the existing general options.
             * 将用户选项添加至已存在的通用选项
             * @param baseOptions The existing options.
             */
            void addGeneralOptions(Options baseOptions);
          
            /**
             * Create a {@link ClusterDescriptor} from the given configuration, 
             * configuration directory and the command line.
             * 根据现有配置, 配置目录和命令行创建一个集群描述器
             *
             * @param commandLine containing command line options relevant for the 
             * ClusterDescriptor
             * commandLine参数包含了所有与集群描述器相关的命令行选项 
             *
             * @return ClusterDescriptor
             * @throws FlinkException if the ClusterDescriptor could not be created
             * 如果集群描述器不能被创建, 就抛出该异常
             */
            ClusterDescriptor<T> createClusterDescriptor(CommandLine commandLine) throws FlinkException;
          
            /**
             * Returns the cluster id if a cluster id was specified on the command line, 
             * otherwise it returns null.
             * 犹如集群id在命令行中有被具体详述, 就返回该集群的id, 否则就返回空
             *
             * <p>A cluster id identifies a running cluster, e.g. the Yarn application id 
             * for a Flink cluster running on Yarn.
             * 一个集群id标注了一个运行中的集群, 比如yarn应用id标注了一个运行在yarn上的Flink集群
             *
             * @param commandLine containing command line options relevant for the cluster 
             * id retrieval
             * commandLine参数包含了与集群id检索相关的命令行选项
             * @return Cluster id identifying the cluster to deploy jobs to or null
             *  返回要部署任务的集群的id 或 空
             */
            @Nullable
            T getClusterId(CommandLine commandLine);
          
            /**
             * Returns the {@link ClusterSpecification} specified by the configuration and 
             * the command line options. This specification can be used to deploy a new 
             * Flink cluster.
             *  返回由配置文件及命令行选项决定的集群规格。该规格可以被用于部署一个新的Flink集群。
             *
             * @param commandLine containing command line options relevant for the  
             * ClusterSpecification
             * commandLine参数包含了与集群规格相关的命令行选项
             *
             * @return ClusterSpecification for a new Flink cluster
             * 为一个新的Flink集群返回集群规格
             * @throws FlinkException if the ClusterSpecification could not be created
             * 如果集群规格未被创建, 抛出该异常
             */
            ClusterSpecification getClusterSpecification(CommandLine commandLine) throws FlinkException;
          
              // 默认解析命令行选项(stopAtNonOptions: 是否在没有选项时停止)
            default CommandLine parseCommandLineOptions(String[] args, boolean stopAtNonOptions) throws CliArgsException {
              final Options options = new Options();
                  // 添加至通用选项
              addGeneralOptions(options);
                  // 添加至运行选项
              addRunOptions(options);
                  // 通过客户端头尾解析器解析选项, 参数 和 是否在没有选项时停止解析, 返回命令行
              return CliFrontendParser.parse(options, args, stopAtNonOptions);
            }
          }
          
          
        • AbstractCustomCommandLine 抽象用户命令行(抽象类)

        
        package org.apache.flink.client.cli;
        
        import org.apache.flink.configuration.Configuration;
        import org.apache.flink.configuration.HighAvailabilityOptions;
        import org.apache.flink.configuration.UnmodifiableConfiguration;
        import org.apache.flink.util.FlinkException;
        import org.apache.flink.util.NetUtils;
        import org.apache.flink.util.Preconditions;
        
        import org.apache.commons.cli.CommandLine;
        import org.apache.commons.cli.Option;
        import org.apache.commons.cli.Options;
        
        import java.net.InetSocketAddress;
        
        import static org.apache.flink.client.cli.CliFrontend.setJobManagerAddressInConfig;
        
        /**
         * Base class for {@link CustomCommandLine} implementations which specify a JobManager 
         * address and a ZooKeeper namespace.
         * 用户命令行接口的基本类, 实现该接口时需要列出一个任务管理器地址及Zookeeper名称空间
         */
        public abstract class AbstractCustomCommandLine<T> implements CustomCommandLine<T> {
        
            // Option构造器: 选项(缩写名), 选项(长名), 是否有参数, 描述
          protected final Option zookeeperNamespaceOption = new Option("z", "zookeeperNamespace", true,
            "Namespace to create the Zookeeper sub-paths for high availability mode");
        
        
          protected final Option addressOption = new Option("m", "jobmanager", true,
            "Address of the JobManager (master) to which to connect. " +
              "Use this flag to connect to a different JobManager than the one specified in the configuration.");
        
          protected final Configuration configuration;
          
          protected AbstractCustomCommandLine(Configuration configuration) {
                // 调用前置条件类中的检查非空方法来检查当前配置, 检查完毕后, 将当前配置设置为无法修改状态
            this.configuration = new UnmodifiableConfiguration(Preconditions.checkNotNull(configuration));
          }
        
          public Configuration getConfiguration() {
            return configuration;
          }
        
          @Override
          public void addRunOptions(Options baseOptions) {
            // nothing to add here
          }
        
          @Override
          public void addGeneralOptions(Options baseOptions) {
            baseOptions.addOption(addressOption);
            baseOptions.addOption(zookeeperNamespaceOption);
          }
        
          /**
           * Override configuration settings by specified command line options.
           * 通过具体的命令行选项重写配置文件设定
           *
           * @param commandLine containing the overriding values
           * commandLine 参数包含了重写的值
           * @return Effective configuration with the overridden configuration settings
           * 返回带有重写的配置设置的有效配置文件
           */
            // applyCommandLineOptionsToConfiguration: 将命令行选项应用到配置中
          protected Configuration applyCommandLineOptionsToConfiguration(CommandLine commandLine) throws FlinkException {
            final Configuration resultingConfiguration = new Configuration(configuration);
        
                // 如果命令行有地址选项
            if (commandLine.hasOption(addressOption.getOpt())) {
                    // 获取端口
              String addressWithPort = commandLine.getOptionValue(addressOption.getOpt());
                    // 获取地址
              InetSocketAddress jobManagerAddress = NetUtils.parseHostPortAddress(addressWithPort);
                    // 设置配置中的任务管理器地址
              setJobManagerAddressInConfig(resultingConfiguration, jobManagerAddress);
            }
            //  如果命令行有Zookeeper选项地址
            if (commandLine.hasOption(zookeeperNamespaceOption.getOpt())) {
                    // 获取Zookeeper的名称空间
              String zkNamespace = commandLine.getOptionValue(zookeeperNamespaceOption.getOpt());
                    // 将高可用集群选项的集群Id作为key, 将Zookeeper名称空间作为value添加至结果配置中
                    // 这其实是个装饰器模式(setString方法内部调用的是setValueInternal(key.key(), value)), 有点坑
              resultingConfiguration.setString(HighAvailabilityOptions.HA_CLUSTER_ID, zkNamespace);
            }
        
            return resultingConfiguration;
          }
        }
        
        
      
      - CliArgsException 客户端参数异常
      
        ```java
        package org.apache.flink.client.cli;
        
        /**
         * Special exception that is thrown when the command line parsing fails.
         * 当命令行解析失败时会被抛出的异常
         */
        public class CliArgsException extends Exception {
        
          private static final long serialVersionUID = 1L;
        
          public CliArgsException(String message) {
            super(message);
          }
        
          public CliArgsException(String message, Throwable cause) {
            super(message, cause);
          }
        }
      
      • CliFrontend

        package org.apache.flink.client.cli;
        
        import org.apache.flink.api.common.ExecutionConfig;
        import org.apache.flink.api.common.InvalidProgramException;
        import org.apache.flink.api.common.JobExecutionResult;
        import org.apache.flink.api.common.JobID;
        import org.apache.flink.api.common.JobSubmissionResult;
        import org.apache.flink.api.common.accumulators.AccumulatorHelper;
        import org.apache.flink.client.deployment.ClusterDescriptor;
        import org.apache.flink.client.deployment.ClusterSpecification;
        import org.apache.flink.client.program.ClusterClient;
        import org.apache.flink.client.program.PackagedProgram;
        import org.apache.flink.client.program.PackagedProgramUtils;
        import org.apache.flink.client.program.ProgramInvocationException;
        import org.apache.flink.client.program.ProgramMissingJobException;
        import org.apache.flink.client.program.ProgramParametrizationException;
        import org.apache.flink.configuration.ConfigConstants;
        import org.apache.flink.configuration.Configuration;
        import org.apache.flink.configuration.CoreOptions;
        import org.apache.flink.configuration.GlobalConfiguration;
        import org.apache.flink.configuration.JobManagerOptions;
        import org.apache.flink.configuration.RestOptions;
        import org.apache.flink.core.fs.FileSystem;
        import org.apache.flink.core.plugin.PluginUtils;
        import org.apache.flink.optimizer.DataStatistics;
        import org.apache.flink.optimizer.Optimizer;
        import org.apache.flink.optimizer.costs.DefaultCostEstimator;
        import org.apache.flink.optimizer.plan.FlinkPlan;
        import org.apache.flink.optimizer.plan.OptimizedPlan;
        import org.apache.flink.optimizer.plan.StreamingPlan;
        import org.apache.flink.optimizer.plandump.PlanJSONDumpGenerator;
        import org.apache.flink.runtime.akka.AkkaUtils;
        import org.apache.flink.runtime.client.JobStatusMessage;
        import org.apache.flink.runtime.jobgraph.JobGraph;
        import org.apache.flink.runtime.jobgraph.JobStatus;
        import org.apache.flink.runtime.messages.Acknowledge;
        import org.apache.flink.runtime.security.SecurityConfiguration;
        import org.apache.flink.runtime.security.SecurityUtils;
        import org.apache.flink.runtime.util.EnvironmentInformation;
        import org.apache.flink.util.ExceptionUtils;
        import org.apache.flink.util.FlinkException;
        import org.apache.flink.util.Preconditions;
        import org.apache.flink.util.ShutdownHookUtil;
        
        import org.apache.commons.cli.CommandLine;
        import org.apache.commons.cli.Options;
        import org.slf4j.Logger;
        import org.slf4j.LoggerFactory;
        
        import java.io.File;
        import java.io.FileNotFoundException;
        import java.lang.reflect.Constructor;
        import java.lang.reflect.InvocationTargetException;
        import java.lang.reflect.UndeclaredThrowableException;
        import java.net.InetSocketAddress;
        import java.net.URL;
        import java.text.SimpleDateFormat;
        import java.time.Duration;
        import java.util.ArrayList;
        import java.util.Arrays;
        import java.util.Collection;
        import java.util.Comparator;
        import java.util.Date;
        import java.util.List;
        import java.util.Map;
        import java.util.concurrent.CompletableFuture;
        import java.util.concurrent.TimeUnit;
        import java.util.stream.Collectors;
        
        /**
         * Implementation of a simple command line frontend for executing 
         * programs.
         * 实现一个简单的命令行前端来执行程序
         */
        public class CliFrontend {
        
            // 获取日志
          private static final Logger LOG = LoggerFactory.getLogger(CliFrontend.class);
        
          // actions 行为: run, info, list, cancel, stop, savepoint
          private static final String ACTION_RUN = "run";
          private static final String ACTION_INFO = "info";
          private static final String ACTION_LIST = "list";
          private static final String ACTION_CANCEL = "cancel";
          private static final String ACTION_STOP = "stop";
          private static final String ACTION_SAVEPOINT = "savepoint";
        
          // configuration dir parameters 配置路径参数 fallback: 应变计划
          private static final String CONFIG_DIRECTORY_FALLBACK_1 = "../conf";
          private static final String CONFIG_DIRECTORY_FALLBACK_2 = "conf";
        
          // --------------------------------------------------------------------------------------------
        
          private final Configuration configuration;
        
            // 带泛型的客户命令行列表
          private final List<CustomCommandLine<?>> customCommandLines;
        
            // 客户命令行选项
          private final Options customCommandLineOptions;
          
            // 客户连接时间超时判定
          private final Duration clientTimeout;
        
            // 默认并行数
          private final int defaultParallelism;
        
          public CliFrontend(
              Configuration configuration,
              List<CustomCommandLine<?>> customCommandLines) {
            this.configuration = Preconditions.checkNotNull(configuration);
            this.customCommandLines = Preconditions.checkNotNull(customCommandLines);
        
                // 使用文件系统初始化, 参数为配置文件 和 根据根文件夹创建的插件管理器
            FileSystem.initialize(configuration, PluginUtils.createPluginManagerFromRootFolder(configuration));
        
            this.customCommandLineOptions = new Options();
        
                // 遍历客户端命令行列表
            for (CustomCommandLine<?> customCommandLine : customCommandLines) {
              customCommandLine.addGeneralOptions(customCommandLineOptions);
              customCommandLine.addRunOptions(customCommandLineOptions);
            }
        
                // 通过akka工具包 (scala写的, 用于简化编写容错的、高可伸缩性的 Java 和 Scala 的 Actor 模型应用) 获取客户端超时时间
            this.clientTimeout = AkkaUtils.getClientTimeout(this.configuration);
            this.defaultParallelism = configuration.getInteger(CoreOptions.DEFAULT_PARALLELISM);
          }
        
          // --------------------------------------------------------------------------------------------
          //  Getter & Setter
          // --------------------------------------------------------------------------------------------
        
          /**
           * Getter which returns a copy of the associated configuration.
           * getter 返回一个相关配置文件的拷贝
           * @return Copy of the associated configuration
           */
          public Configuration getConfiguration() {
            Configuration copiedConfiguration = new Configuration();
        
            copiedConfiguration.addAll(configuration);
        
            return copiedConfiguration;
          }
        
          public Options getCustomCommandLineOptions() {
            return customCommandLineOptions;
          }
        
          // --------------------------------------------------------------------------------------------
          //  Execute Actions 执行行为
          // --------------------------------------------------------------------------------------------
        
          /**
           * Executions the run action. 执行运行行为
           *
           * @param args Command line arguments for the run action.
           */
          protected void run(String[] args) throws Exception {
            LOG.info("Running 'run' command.");
        
                // 通过客户端前端解析器获取命令选项
            final Options commandOptions = CliFrontendParser.getRunCommandOptions();
        
                // 通过客户端前端解析器获取命令行选项(解析器合并命令选项和客户端命令行选项)
            final Options commandLineOptions = CliFrontendParser.mergeOptions(commandOptions, customCommandLineOptions);
        
                // 通过客户端前端解析器解析 命令行选项, args: 参数 是否在没有选项时停止(true)
            final CommandLine commandLine = CliFrontendParser.parse(commandLineOptions, args, true);
        
            final RunOptions runOptions = new RunOptions(commandLine);
        
            // evaluate help flag 评估帮助标识
                  // 如果运行选项时打印帮助
            if (runOptions.isPrintHelp()) {
              CliFrontendParser.printHelpForRun(customCommandLines);
              return;
            }
             // 如果运行选项不是python
            if (!runOptions.isPython()) {
              // Java program should be specified a JAR file
                       // java 程序应该被规定为一个jar文件
                         // 如果运行选项的jar包参数为空, 就报该错误
              if (runOptions.getJarFilePath() == null) {
                throw new CliArgsException("Java program should be specified a JAR file.");
              }
            }
        
                  // 打包的程序
            final PackagedProgram program;
            try {
                    // 向日志中添加日志信息
              LOG.info("Building program from JAR file");
                    // 根据运行选项来创建该程序
              program = buildProgram(runOptions);
            }
            catch (FileNotFoundException e) {
              throw new CliArgsException("Could not build the program from JAR file.", e);
            }
            // 获取活跃的客户端命令行
            final CustomCommandLine<?> customCommandLine = getActiveCustomCommandLine(commandLine);
        
            try {
              runProgram(customCommandLine, commandLine, runOptions, program);
            } finally {
                    // 最终, 删除提取出的资料
              program.deleteExtractedLibraries();
            }
          }
        
            // 运行程序方法: 参数: 客户端命令行, 命令行, 运行选项, 打包的程序 
            // 会跑出程序调用异常
          private <T> void runProgram(
              CustomCommandLine<T> customCommandLine,
              CommandLine commandLine,
              RunOptions runOptions,
              PackagedProgram program) throws ProgramInvocationException, FlinkException {
                // 通过命令行获取集群的描述器
            final ClusterDescriptor<T> clusterDescriptor = customCommandLine.createClusterDescriptor(commandLine);
        
            try {
                    // 通过命令行获取集群的Id
              final T clusterId = customCommandLine.getClusterId(commandLine);
        
                    // 集群的客户
              final ClusterClient<T> client;
        
              // directly deploy the job if the cluster is started in job mode and detached
                     // 如果集群已经以工作模式启动并且主线程与子线程分离(分离状态), 那么直接部署任务就行了    
                       // 如果集群id为空 且 集群已经处于分离状态
              if (clusterId == null && runOptions.getDetachedMode()) {
                        // 如果运行选项的并行数为 -1, 就将并行数设置为默认并行数, 否则就使用当前选项的并行数
                int parallelism = runOptions.getParallelism() == -1 ? defaultParallelism : runOptions.getParallelism();
        
                        // 根据打包的程序, 配置文件 和 并行数 创建任务图
                final JobGraph jobGraph = PackagedProgramUtils.createJobGraph(program, configuration, parallelism);
        
                        // 通过命令行获取最终的集群规格
                final ClusterSpecification clusterSpecification = customCommandLine.getClusterSpecification(commandLine);
                        // 通过集群描述器部署集群
                client = clusterDescriptor.deployJobCluster(
                  clusterSpecification,
                  jobGraph,
                  runOptions.getDetachedMode()); // 分离模式
        
                        // 登录并打印输出 任务已被提交
                logAndSysout("Job has been submitted with JobID " + jobGraph.getJobID());
        
                try {
                            // 关闭客户端
                  client.close();
                } catch (Exception e) {
                  LOG.info("Could not properly shut down the client.", e);
                }
              } else {
                        // 关闭挂钩: 用于序退出的时候,做一些Check,保证已经开始的操作X的原子性, 优雅地关闭程序
        // 当程序即将退出时,查看当前是否有操作X在执行中:
             // 如果有,等待其完成然后退出。且期间不再接受新的操作X。如果操作X执行之间过长,终止并回滚所有状态。     
             // 如果没有,则可以立即退出。   
        
                final Thread shutdownHook;
                if (clusterId != null) {
                  client = clusterDescriptor.retrieve(clusterId);
                  shutdownHook = null;
                } else {
                  // also in job mode we have to deploy a session cluster because the job might consist of multiple parts (e.g. when using collect)
                              // 在任务模式下, 由于任务可能由许多部分组成, 我们必须部署一个会话集群
                  // 集群规格
                  final ClusterSpecification clusterSpecification = customCommandLine.getClusterSpecification(commandLine);
                              // 部署会话集群
                  client = clusterDescriptor.deploySessionCluster(clusterSpecification);
                  // if not running in detached mode, add a shutdown hook to shut down cluster if client exits
                              // 如果没有在分离状态下运行, 当客户端存在时, 添加一个关闭挂钩来关闭集群
                  // there's a race-condition here if cli is killed before shutdown hook is installed
                              // 如果客户端在被打包到本地仓库之前就已经被杀死, 就会产生一个竞态(争) 条件
                            
                              // 如果分离状态模式不存在 且 运行选项已经通过相关联的exit方法关闭
                  if (!runOptions.getDetachedMode() && runOptions.isShutdownOnAttachedExit()) {
                                  // 通过客户端的要关闭的集群名称, 客户端类的名称 和日志 来初始化 关闭挂钩
                    shutdownHook = ShutdownHookUtil.addShutdownHook(client::shutDownCluster, client.getClass().getSimpleName(), LOG);
                  } else {
                                // 否则就将关闭挂钩设置为空
                    shutdownHook = null;
                  }
                }
        
                try {
                              // 设置分离状态
                  client.setDetached(runOptions.getDetachedMode());
        
                              // 根据保存点配置进行debug, 并写入到日志
                  LOG.debug("{}", runOptions.getSavepointRestoreSettings());
        
                              // 用户并行量
                  int userParallelism = runOptions.getParallelism();
                  LOG.debug("User parallelism is set to {}", userParallelism);
                              // 如果用户并行量等于执行配置类中的默认并行量
                  if (ExecutionConfig.PARALLELISM_DEFAULT == userParallelism) {
                                // 就将用户并行量设置成默认并行量
                    userParallelism = defaultParallelism;
                  }
                  // 执行程序
                  executeProgram(program, client, userParallelism);
                } finally {
                              // 如果集群id为空 并且 没有被分离
                  if (clusterId == null && !client.isDetached()) {
                    // terminate the cluster only if we have started it before and if it's not detached
                                  // 只有在集群已经启动过 且 它没有被分离 的情况下才终止集群
                    try {
                      client.shutDownCluster();
                    } catch (final Exception e) {
                      LOG.info("Could not properly terminate the Flink cluster.", e);
                    }
                    if (shutdownHook != null) {
                      // we do not need the hook anymore as we have just tried to shutdown the cluster.
                                       // ∵我们已经尝试过关闭集群了, ∴ 我们不需要关闭挂钩了(将它移除)
                      ShutdownHookUtil.removeShutdownHook(shutdownHook, client.getClass().getSimpleName(), LOG);
                    }
                  }
                  try {
                    client.close();
                  } catch (Exception e) {
                    LOG.info("Could not properly shut down the client.", e);
                  }
                }
              }
            } finally {
              try {
                clusterDescriptor.close();
              } catch (Exception e) {
                LOG.info("Could not properly close the cluster descriptor.", e);
              }
            }
          }
        
          /**
           * Executes the info action.
           * 执行通知行为
           *
           * @param args Command line arguments for the info action.
           */
          protected void info(String[] args) throws CliArgsException, FileNotFoundException, ProgramInvocationException {
            LOG.info("Running 'info' command.");
        
                // 通过客户端前端解析器获取通知相关命令行选项
            final Options commandOptions = CliFrontendParser.getInfoCommandOptions();
        
                // 通过客户端前端解析器解析命令行 true|false: 是否在没有命令行选项时停止解析
            final CommandLine commandLine = CliFrontendParser.parse(commandOptions, args, true);
        
            InfoOptions infoOptions = new InfoOptions(commandLine);
        
            // evaluate help flag 评估帮助标签
            if (infoOptions.isPrintHelp()) {
              CliFrontendParser.printHelpForInfo();
              return;
            }
        
                // 如果通知选项的jar文件路径为空
            if (infoOptions.getJarFilePath() == null) {
              throw new CliArgsException("The program JAR file was not specified.");
            }
        
            // -------- build the packaged program -------------
                 // 创建打包的程序
        
            LOG.info("Building program from JAR file");
            final PackagedProgram program = buildProgram(infoOptions);
        
            try {
                    // 获取并行量
              int parallelism = infoOptions.getParallelism();
                    // 如果执行配置文件的默认并行量与该并行量一致, 就将其设置为默认并行量
              if (ExecutionConfig.PARALLELISM_DEFAULT == parallelism) {
                parallelism = defaultParallelism;
              }
        
              LOG.info("Creating program plan dump");
              // 创建 程序的 进程的内存镜像(dump)
                     // 通过 数据统计, 默认开销预估器 和 配置文件 来初始化 优化程序
              Optimizer compiler = new Optimizer(new DataStatistics(), new DefaultCostEstimator(), configuration);
                     // 通过 优化程序, 打包的程序, 并行数 来 初始化 flinkr任务
              FlinkPlan flinkPlan = ClusterClient.getOptimizedPlan(compiler, program, parallelism);
                    
                     // json任务
              String jsonPlan = null;
                      // 如果flink任务 是 优化过的任务类
              if (flinkPlan instanceof OptimizedPlan) {
                        // 通过json格式的plan dump(计划程序内存镜像) 将优化器计划转化为JSON格式
                jsonPlan = new PlanJSONDumpGenerator().getOptimizerPlanAsJSON((OptimizedPlan) flinkPlan);
                        // 如果flink 任务 为流式计算任务
              } else if (flinkPlan instanceof StreamingPlan) {
                        // 将流式任务转化为JSON格式
                jsonPlan = ((StreamingPlan) flinkPlan).getStreamingPlanAsJSON();
              }
        
                    // 如果json计划不为空, 就执行文件
              if (jsonPlan != null) {
                System.out.println("----------------------- Execution Plan -----------------------");
                System.out.println(jsonPlan);
                System.out.println("--------------------------------------------------------------");
              }
              else {
                System.out.println("JSON plan could not be generated.");
              }
        
                    // 获取程序状态的描述
              String description = program.getDescription();
                    // 如果描述不为空
              if (description != null) {
                System.out.println();
                System.out.println(description);
              }
                    // 如果描述为空
              else {
                System.out.println();
                System.out.println("No description provided.");
              }
            }
            finally {
                    // 删除提取出的资料
              program.deleteExtractedLibraries();
            }
          }
        
          /**
           * Executes the list action. 执行计划列表
           *
           * @param args Command line arguments for the list action.
           * args 为列表行为的参数
           */
          protected void list(String[] args) throws Exception {
            LOG.info("Running 'list' command.");
        
                // 通过客户端前端解析器获取命令行选项
            final Options commandOptions = CliFrontendParser.getListCommandOptions();
        
                // 通过客户端前端解析器 合并 命令行选项 和 客户端命令行选项
            final Options commandLineOptions = CliFrontendParser.mergeOptions(commandOptions, customCommandLineOptions);
        
                // 通过命令解析器来解析命令行选项 false为如果没有选项是否停止解析的标签
            final CommandLine commandLine = CliFrontendParser.parse(commandLineOptions, args, false);
        
                // 初始化列表选项
            ListOptions listOptions = new ListOptions(commandLine);
        
            // evaluate help flag 评估帮助标签
                   // 如果选项是打印帮助, 就通过解析器打印帮助列表
            if (listOptions.isPrintHelp()) {
              CliFrontendParser.printHelpForList(customCommandLines);
              return;
            }
        
                // 是否展示运行
            final boolean showRunning;
                // 展示是否已有安排
            final boolean showScheduled;
                // 是否展示所有
            final boolean showAll;
        
            // print running and scheduled jobs if not option supplied
                  // 如果没有提供选项, 默认会打印运行状态和已安排的任务, 否则就按照配置的选项执行
            if (!listOptions.showRunning() && !listOptions.showScheduled() && !listOptions.showAll()) {
              showRunning = true;
              showScheduled = true;
              showAll = false;
            } else {
              showRunning = listOptions.showRunning();
              showScheduled = listOptions.showScheduled();
              showAll = listOptions.showAll();
            }
                // 获取活跃的命令行
            final CustomCommandLine<?> activeCommandLine = getActiveCustomCommandLine(commandLine);
        
                 // 运行集群的行为
            runClusterAction(
              activeCommandLine,
              commandLine,
              clusterClient -> listJobs(clusterClient, showRunning, showScheduled, showAll));
        
          }
             // 任务列表
          private <T> void listJobs(
                   // 集群客户端
              ClusterClient<T> clusterClient,
              boolean showRunning,
              boolean showScheduled,
              boolean showAll) throws FlinkException {
                // 自定任务状态信息的一个集合: 任务详情
            Collection<JobStatusMessage> jobDetails;
            try {
                    /**
                    * Future: 异步计算的未来结果
                    * CompletableFuture可以用于创建异步调用集群的列表任务
                    * JDK5新增了Future接口,用于描述一个异步计算的结果。虽然 Future 以及相关使用方法提供了异步执行任务的能力,但是对于结果的获取却是很不方便,只能通过阻塞或者轮询的方式得到任务的结果。阻塞的方式显然和我们的异步编程的初衷相违背,轮询的方式又会耗费无谓的 CPU 资源,而且也不能及时地得到计算结果。
                    * 在Java8中,CompletableFuture提供了非常强大的Future的扩展功能,可以帮助我们简化异步编程的复杂性,并且提供了函数式编程的能力,可以通过回调的方式处理计算结果,也提供了转换和组合 CompletableFuture 的方法
                    */
              CompletableFuture<Collection<JobStatusMessage>> jobDetailsFuture = clusterClient.listJobs();
        
                     // 登录并打印
              logAndSysout("Waiting for response...");
                    
                     // 从future中获取任务细节
              jobDetails = jobDetailsFuture.get();
        
            } catch (Exception e) {
                    // 通过剥离执行异常来获取导致该异常的原因
              Throwable cause = ExceptionUtils.stripExecutionException(e);
              throw new FlinkException("Failed to retrieve job list.", cause);
            }
        
            LOG.info("Successfully retrieved list of jobs");
            // 运行的任务列表
            final List<JobStatusMessage> runningJobs = new ArrayList<>();
                 // 已经安排好的任务列表
            final List<JobStatusMessage> scheduledJobs = new ArrayList<>();
                 // 已经终止的任务列表
            final List<JobStatusMessage> terminatedJobs = new ArrayList<>();
                 // 遍历任务细节列表
            jobDetails.forEach(details -> {
                     // 如果该任务细节的任务状态为初始状态
              if (details.getJobState() == JobStatus.CREATED) {
                          // 就将其添加到已安排好的任务列表
                scheduledJobs.add(details);
                        // 如果 该任务细节的任务状态不是全局终止状态
              } else if (!details.getJobState().isGloballyTerminalState()) {
                         // 就将该任务细节添加至运行中的任务列表中
                runningJobs.add(details);
              } else {
                          // 否则就将该任务添加至终止的任务列表中
                terminatedJobs.add(details);
              }
            });
            // 如果展示运行标签为 true  或 展示所有标签为 true
            if (showRunning || showAll) {
                     // 如果任务运行列表为空
              if (runningJobs.size() == 0) {
                System.out.println("No running jobs.");
              }
              else {
                System.out.println("------------------ Running/Restarting Jobs -------------------");
                        // 打印运行中的任务状态信息
                printJobStatusMessages(runningJobs);
                System.out.println("--------------------------------------------------------------");
              }
            }
                 // 如果展示已安排的任务标签为 true 或 展示所有的任务标签为 true
            if (showScheduled || showAll) {
                     // 如果已安排好的任务列表为空
              if (scheduledJobs.size() == 0) {
                System.out.println("No scheduled jobs.");
              }
              else {
                System.out.println("----------------------- Scheduled Jobs -----------------------");
                        // 打印已安排好的任务列表的状态信息
                printJobStatusMessages(scheduledJobs);
                System.out.println("--------------------------------------------------------------");
              }
            }
                // 如果显示所有的标签为 true
            if (showAll) {
                    // 如果终止的任务列表不为空
              if (terminatedJobs.size() != 0) {
                System.out.println("---------------------- Terminated Jobs -----------------------");
                         // 就打印终止的任务列表的状态信息
                printJobStatusMessages(terminatedJobs);
                System.out.println("--------------------------------------------------------------");
              }
            }
          }
        
            // 打印任务状态信息
          private static void printJobStatusMessages(List<JobStatusMessage> jobs) {
                // 简单日期类型
            SimpleDateFormat dateFormat = new SimpleDateFormat("dd.MM.yyyy HH:mm:ss");
                // 比较任务状态信息的起始时间
            Comparator<JobStatusMessage> startTimeComparator = (o1, o2) -> (int) (o1.getStartTime() - o2.getStartTime());
                // 以 任务状态为key, 任务状态信息列表为value的Map的 比较器
                // CASE_INSENSITIVE_ORDER 非敏感命令状态比较器, 位于JAVA的String类中
                // CaseInsensitiveComparator是排序方法的实现类,在compare中先按照字符串长度取短的那个字符串的长度作为条件,然后循环判断两个字符串的第一个字符的ASCII码大小,做出递增排序,如果两个字符串第一个字符的ASCII码一致,则判断第二个字符,以此类推,通过这种方式将字符串通过首字母的ASCII码进行排序。
            Comparator<Map.Entry<JobStatus, List<JobStatusMessage>>> statusComparator =
              (o1, o2) -> String.CASE_INSENSITIVE_ORDER.compare(o1.getKey().toString(), o2.getKey().toString());
        
                // 获取匹配任务状态和任务状态信息列表的map, 再通过util包中的stream类(流, jdk1.8新特性)中收集方法来收集根据任务状态进行分组后的收集器类。其实是将分组后的数据打包成一个收集器对象, 再通过collect方法对它进行解包。
            Map<JobStatus, List<JobStatusMessage>> jobsByState = jobs.stream().collect(Collectors.groupingBy(JobStatusMessage::getJobState));
                // 对任务状态根据状态比较器进行状态比较
                // 再获取排序后的map的value
                // 再遍历扁平化的map列表, 获取流并根据起始时间比较器进行排序
            jobsByState.entrySet().stream()
              .sorted(statusComparator)
              .map(Map.Entry::getValue).flatMap(List::stream).sorted(startTimeComparator)
              .forEachOrdered(job ->
                System.out.println(dateFormat.format(new Date(job.getStartTime()))
                  + " : " + job.getJobId() + " : " + job.getJobName()
                  + " (" + job.getJobState() + ")"));
          }
        
          /**
           * Executes the STOP action.
           * 执行停止行为
           * @param args Command line arguments for the stop action.
           */
          protected void stop(String[] args) throws Exception {
            LOG.info("Running 'stop-with-savepoint' command.");
        
                // 通过客户端前端解析器获取命令行选项
            final Options commandOptions = CliFrontendParser.getStopCommandOptions();
                
                // 通过客户端前端解析器 合并 命令行选项 和 客户端命令行选项
            final Options commandLineOptions = CliFrontendParser.mergeOptions(commandOptions, customCommandLineOptions);
                
                 // 通过命令解析器来解析命令行选项 false为如果没有选项是否停止解析的标签
            final CommandLine commandLine = CliFrontendParser.parse(commandLineOptions, args, false);
        
                // 初始化停止选项
            final StopOptions stopOptions = new StopOptions(commandLine);
                // 如果选项是打印帮助
            if (stopOptions.isPrintHelp()) {
              CliFrontendParser.printHelpForStop(customCommandLines);
              return;
            }
        
                // 已清理的参数数组
            final String[] cleanedArgs = stopOptions.getArgs();
            // 获取目标目录 如果如果停止选项有保存点标签 且 已清理的参数不为空,
                 // 就获取目标目录, 否则返回空
            final String targetDirectory = stopOptions.hasSavepointFlag() && cleanedArgs.length > 0
                ? stopOptions.getTargetDirectory()
                : null; // the default savepoint location is going to be used in this case.
                                  // 返回空的情况下会使用默认的保存点
        
                // 如果清理的参数列表不等于0, jobId就等于该列表的第一个字符串的解析结果, 否则就等于停止选项的目标目录的解析结果
            final JobID jobId = cleanedArgs.length != 0
                ? parseJobId(cleanedArgs[0])
                : parseJobId(stopOptions.getTargetDirectory());
        
                // 获取 是否要设置提早结束事件的时间 的 标签
            final boolean advanceToEndOfEventTime = stopOptions.shouldAdvanceToEndOfEventTime();
        
            logAndSysout((advanceToEndOfEventTime ? "Draining job " : "Suspending job ") + "\"" + jobId + "\" with a savepoint.");
        
                // 获取活跃的消费者命令行
            final CustomCommandLine<?> activeCommandLine = getActiveCustomCommandLine(commandLine);
                // 执行集群行为
            runClusterAction(
              activeCommandLine,
              commandLine,
              clusterClient -> {
                final String savepointPath;
                try {
                              // 根据任务id, 是否提前结束任务 和 目标文件目录 获取保存点路径
                  savepointPath = clusterClient.stopWithSavepoint(jobId, advanceToEndOfEventTime, targetDirectory);
                } catch (Exception e) {
                  throw new FlinkException("Could not stop with a savepoint job \"" + jobId + "\".", e);
                }
                logAndSysout("Savepoint completed. Path: " + savepointPath);
              });
          }
        
          /**
           * Executes the CANCEL action.
           * 执行取消操作
           *
           * @param args Command line arguments for the cancel action.
           */
          protected void cancel(String[] args) throws Exception {
            LOG.info("Running 'cancel' command.");
        
                // 通过客户端前端解析器获取命令行选项
            final Options commandOptions = CliFrontendParser.getCancelCommandOptions();
        
                // 通过客户端前端解析器 合并 命令行选项 和 客户端命令行选项
            final Options commandLineOptions = CliFrontendParser.mergeOptions(commandOptions, customCommandLineOptions);
        
                // 通过命令解析器来解析命令行选项 false为如果没有选项是否停止解析的标签
            final CommandLine commandLine = CliFrontendParser.parse(commandLineOptions, args, false);
        
                // 取消选项
            CancelOptions cancelOptions = new CancelOptions(commandLine);
        
            // evaluate help flag 评估 帮助标签
            if (cancelOptions.isPrintHelp()) {
              CliFrontendParser.printHelpForCancel(customCommandLines);
              return;
            }
        
                // 获取活跃的客户端命令行
            final CustomCommandLine<?> activeCommandLine = getActiveCustomCommandLine(commandLine);
        
                // 获取已清理的参数
            final String[] cleanedArgs = cancelOptions.getArgs();
        
                // 如果取消选项有保存点
            if (cancelOptions.isWithSavepoint()) {
        
                    // 这个过时警告就是个唬人的, 照样能执行......
              logAndSysout("DEPRECATION WARNING: Cancelling a job with savepoint is deprecated. Use \"stop\" instead.");
        
                     // 任务 ID
              final JobID jobId;
                     // 目标文件目录
              final String targetDirectory;
        
                    // 如果任务参数不为空
              if (cleanedArgs.length > 0) {
                         // 就将已清除的参数数组的第一个字符串进行解析获取任务id
                jobId = parseJobId(cleanedArgs[0]);
                         // 获取保存点目标目录
                targetDirectory = cancelOptions.getSavepointTargetDirectory();
              } else {
                          // 否则就将任务id设置为取消选项的保存点目标文件目录
                jobId = parseJobId(cancelOptions.getSavepointTargetDirectory());
                         // 将目标目录设置为空
                targetDirectory = null;
              }
        
                    // 如果目标目录为空
              if (targetDirectory == null) {
                logAndSysout("Cancelling job " + jobId + " with savepoint to default savepoint directory.");
              } else {
                logAndSysout("Cancelling job " + jobId + " with savepoint to " + targetDirectory + '.');
              }
        
                    // 运行集群行为(保存点)
              runClusterAction(
                activeCommandLine,
                commandLine,
                clusterClient -> {
                  final String savepointPath;
                  try {
                    savepointPath = clusterClient.cancelWithSavepoint(jobId, targetDirectory);
                  } catch (Exception e) {
                    throw new FlinkException("Could not cancel job " + jobId + '.', e);
                  }
                  logAndSysout("Cancelled job " + jobId + ". Savepoint stored in " + savepointPath + '.');
                });
            } else {
              final JobID jobId;
        
                    // 如果清理的参数不为空
              if (cleanedArgs.length > 0) {
                jobId = parseJobId(cleanedArgs[0]);
              } else {
                throw new CliArgsException("Missing JobID. Specify a JobID to cancel a job.");
              }
        
              logAndSysout("Cancelling job " + jobId + '.');
        
                    // 运行集群行为(取消任务)
              runClusterAction(
                activeCommandLine,
                commandLine,
                clusterClient -> {
                  try {
                    clusterClient.cancel(jobId);
                  } catch (Exception e) {
                    throw new FlinkException("Could not cancel job " + jobId + '.', e);
                  }
                });
        
              logAndSysout("Cancelled job " + jobId + '.');
            }
          }
        
          /**
           * Executes the SAVEPOINT action.
           * 执行保存点行为
           * @param args Command line arguments for the savepoint action.
           */
          protected void savepoint(String[] args) throws Exception {
            LOG.info("Running 'savepoint' command.");
        
                // 通过客户端前端解析器获取保存点命令选项
            final Options commandOptions = CliFrontendParser.getSavepointCommandOptions();
        
                // 通过客户端前端解析器 合并 命令行选项 和 客户端命令行选项
            final Options commandLineOptions = CliFrontendParser.mergeOptions(commandOptions, customCommandLineOptions);
        
                // 通过命令解析器来解析命令行选项 false为如果没有选项是否停止解析的标签
            final CommandLine commandLine = CliFrontendParser.parse(commandLineOptions, args, false);
        
                // 初始化保存点选项
            final SavepointOptions savepointOptions = new SavepointOptions(commandLine);
        
            // evaluate help flag 评估帮助标识
            if (savepointOptions.isPrintHelp()) {
              CliFrontendParser.printHelpForSavepoint(customCommandLines);
              return;
            }
            // 获取活跃的客户端命令行
            final CustomCommandLine<?> activeCommandLine = getActiveCustomCommandLine(commandLine);
        
                // 如果保存点选项的是否释放本机资源标签为 true
            if (savepointOptions.isDispose()) {
              runClusterAction(
                activeCommandLine,
                commandLine,
                         // 向任务管理器发送释放保存点资源请求
                clusterClient -> disposeSavepoint(clusterClient, savepointOptions.getSavepointPath()));
            } else {
                    // 获取已清理的参数
              String[] cleanedArgs = savepointOptions.getArgs();
        
              final JobID jobId;
        
                     // 如果已清理的参数不为空 且 数组长度大于等于1
              if (cleanedArgs.length >= 1) {
                         // 就将任务Id 设置为已清理的参数String数组的第一个参数
                String jobIdString = cleanedArgs[0];
                
                          // 解析该String类的任务Id成任务Id
                           // parseJobId内部调用JobID.fromHexString方法将16进制字符串转化为byte数组并传入JobId构造器来初始化JobId对象
                jobId = parseJobId(jobIdString);
              } else {
                throw new CliArgsException("Missing JobID. " +
                  "Specify a Job ID to trigger a savepoint.");
              }
        
                     // 保存点目录
              final String savepointDirectory;
                    
                     // 如果已清理的参数数组长度大于等于2
              if (cleanedArgs.length >= 2) {
                         // 就将任务Id 设置为已清理的参数String数组的第二个参数
                savepointDirectory = cleanedArgs[1];
              } else {
                savepointDirectory = null;
              }
        
              // Print superfluous arguments 打印多余的参数
              if (cleanedArgs.length >= 3) {
                logAndSysout("Provided more arguments than required. Ignoring not needed arguments.");
              }
        
                     // 运行集群行为
              runClusterAction(
                activeCommandLine,
                commandLine,
                clusterClient -> triggerSavepoint(clusterClient, jobId, savepointDirectory));
            }
        
          }
        
          /**
           * Sends a SavepointTriggerMessage to the job manager.
           * 将保存点启动信息发送给任务管理器
           */
          private String triggerSavepoint(ClusterClient<?> clusterClient, JobID jobId, String savepointDirectory) throws FlinkException {
            logAndSysout("Triggering savepoint for job " + jobId + '.');
                // 这里又使用了CompletableFuture来实现异步调用
            CompletableFuture<String> savepointPathFuture = clusterClient.triggerSavepoint(jobId, savepointDirectory);
        
            logAndSysout("Waiting for response...");
        
            final String savepointPath;
        
            try {
                     // 从future中获取保存点路径
              savepointPath = savepointPathFuture.get();
            }
            catch (Exception e) {
              Throwable cause = ExceptionUtils.stripExecutionException(e);
              throw new FlinkException("Triggering a savepoint for the job " + jobId + " failed.", cause);
            }
        
            logAndSysout("Savepoint completed. Path: " + savepointPath);
            logAndSysout("You can resume your program from this savepoint with the run command.");
        
            return savepointPath;
          }
        
          /**
           * Sends a SavepointDisposalRequest to the job manager.
           */
          private void disposeSavepoint(ClusterClient<?> clusterClient, String savepointPath) throws FlinkException {
            Preconditions.checkNotNull(savepointPath, "Missing required argument: savepoint path. " +
              "Usage: bin/flink savepoint -d <savepoint-path>");
        
            logAndSysout("Disposing savepoint '" + savepointPath + "'.");
        
            final CompletableFuture<Acknowledge> disposeFuture = clusterClient.disposeSavepoint(savepointPath);
        
            logAndSysout("Waiting for response...");
        
            try {
              disposeFuture.get(clientTimeout.toMillis(), TimeUnit.MILLISECONDS);
            } catch (Exception e) {
              throw new FlinkException("Disposing the savepoint '" + savepointPath + "' failed.", e);
            }
        
            logAndSysout("Savepoint '" + savepointPath + "' disposed.");
          }
        
          // --------------------------------------------------------------------------------------------
          //  Interaction with programs and JobManager
             //  与程序和任务管理器交互
          // --------------------------------------------------------------------------------------------
        
          protected void executeProgram(PackagedProgram program, ClusterClient<?> client, int parallelism) throws ProgramMissingJobException, ProgramInvocationException {
            logAndSysout("Starting execution of program");
        
            final JobSubmissionResult result = client.run(program, parallelism);
        
            if (null == result) {
              throw new ProgramMissingJobException("No JobSubmissionResult returned, please make sure you called " +
                "ExecutionEnvironment.execute()");
            }
        
            if (result.isJobExecutionResult()) {
              logAndSysout("Program execution finished");
              JobExecutionResult execResult = result.getJobExecutionResult();
              System.out.println("Job with JobID " + execResult.getJobID() + " has finished.");
              System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms");
              Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults();
              if (accumulatorsResult.size() > 0) {
                System.out.println("Accumulator Results: ");
                System.out.println(AccumulatorHelper.getResultsFormatted(accumulatorsResult));
              }
            } else {
              logAndSysout("Job has been submitted with JobID " + result.getJobID());
            }
          }
        
          /**
           * Creates a Packaged program from the given command line options.
           * 根据给出的命令行选项创建一个打包好的程序
           * @return A PackagedProgram (upon success)
           */
          PackagedProgram buildProgram(ProgramOptions options) throws FileNotFoundException, ProgramInvocationException {
            String[] programArgs = options.getProgramArgs();
            String jarFilePath = options.getJarFilePath();
            List<URL> classpaths = options.getClasspaths();
        
            // Get assembler class
                 // 获取汇编程序类
            String entryPointClass = options.getEntryPointClassName();
            File jarFile = null;
            if (options.isPython()) {
              // If the job is specified a jar file
              if (jarFilePath != null) {
                jarFile = getJarFile(jarFilePath);
              }
        
              // If the job is Python Shell job, the entry point class name is PythonGateWayServer.
                     // 如果任务是python shell 任务, 进入点的类名应该为 Python 网关服务器
              // Otherwise, the entry point class of python job is PythonDriver
                     // 否则, 进入点的类名称应该为Python 驱动器
              if (entryPointClass == null) {
                entryPointClass = "org.apache.flink.client.python.PythonDriver";
              }
            } else {
              if (jarFilePath == null) {
                throw new IllegalArgumentException("Java program should be specified a JAR file.");
              }
              jarFile = getJarFile(jarFilePath);
            }
        
            PackagedProgram program = entryPointClass == null ?
                new PackagedProgram(jarFile, classpaths, programArgs) :
                new PackagedProgram(jarFile, classpaths, entryPointClass, programArgs);
        
            program.setSavepointRestoreSettings(options.getSavepointRestoreSettings());
        
            return program;
          }
        
          /**
           * Gets the JAR file from the path.
           * 通过路径获取jar文件
           *
           * @param jarFilePath The path of JAR file
           * @return The JAR file
           * @throws FileNotFoundException The JAR file does not exist.
           */
          private File getJarFile(String jarFilePath) throws FileNotFoundException {
            File jarFile = new File(jarFilePath);
            // Check if JAR file exists
                 // 检查 jar 文件是否存在, 是否为文件类
            if (!jarFile.exists()) {
              throw new FileNotFoundException("JAR file does not exist: " + jarFile);
            }
            else if (!jarFile.isFile()) {
              throw new FileNotFoundException("JAR file is not a file: " + jarFile);
            }
            return jarFile;
          }
        
          // --------------------------------------------------------------------------------------------
          //  Logging and Exception Handling
            //  日志和异常处理
          // --------------------------------------------------------------------------------------------
        
          /**
           * Displays an exception message for incorrect command line arguments.
           * 展示不正确命令行参数导致的异常消息
           *
           * @param e The exception to display.
           * @return The return code for the process.
           */
          private static int handleArgException(CliArgsException e) {
            LOG.error("Invalid command line arguments.", e);
        
            System.out.println(e.getMessage());
            System.out.println();
            System.out.println("Use the help option (-h or --help) to get help on the command.");
            return 1;
          }
        
          /**
           * Displays an optional exception message for incorrect program parametrization.
           * 展示了不正确的程序参数化会导致的选择异常信息
           * @param e The exception to display.
           * @return The return code for the process.
           */
          private static int handleParametrizationException(ProgramParametrizationException e) {
            LOG.error("Program has not been parametrized properly.", e);
            System.err.println(e.getMessage());
            return 1;
          }
        
          /**
           * Displays a message for a program without a job to execute.
           * 展示了没有任务执行会导致的错误消息
           * @return The return code for the process.
           */
          private static int handleMissingJobException() {
            System.err.println();
            System.err.println("The program didn't contain a Flink job. " +
              "Perhaps you forgot to call execute() on the execution environment.");
            return 1;
          }
        
          /**
           * Displays an exception message.
           *
           * 展示异常消息
           * @param t The exception to display.
           * @return The return code for the process.
           */
            // 处理错误
          private static int handleError(Throwable t) {
            LOG.error("Error while running the command.", t);
        
            System.err.println();
            System.err.println("------------------------------------------------------------");
            System.err.println(" The program finished with the following exception:");
            System.err.println();
        
                 // 如果导致异常的原因是无效的程序异常
            if (t.getCause() instanceof InvalidProgramException) {
                     // 打印错误消息
              System.err.println(t.getCause().getMessage());
                     // 获取栈追踪信息(数组)
              StackTraceElement[] trace = t.getCause().getStackTrace();
                     // 遍历栈追踪元素数组
              for (StackTraceElement ele: trace) {
                         // 打印元素信息
                System.err.println("\t" + ele);
                          // 如果遍历到了main方法, 就退出
                if (ele.getMethodName().equals("main")) {
                  break;
                }
              }
            } else {
              t.printStackTrace();
            }
            return 1;
          }
            // 写入日志并打印
          private static void logAndSysout(String message) {
            LOG.info(message);
            System.out.println(message);
          }
        
          // --------------------------------------------------------------------------------------------
          //  Internal methods 内部方法
          // --------------------------------------------------------------------------------------------
        
            // 解析任务Id
          private JobID parseJobId(String jobIdString) throws CliArgsException {
            if (jobIdString == null) {
              throw new CliArgsException("Missing JobId");
            }
        
            final JobID jobId;
            try {
                     // 将16进制的任务id字符串转化为Byte数组再传入JobId构造器中初始化JobId对象
              jobId = JobID.fromHexString(jobIdString);
            } catch (IllegalArgumentException e) {
              throw new CliArgsException(e.getMessage());
            }
            return jobId;
          }
        
          /**
           * Retrieves the {@link ClusterClient} from the given {@link CustomCommandLine} and runs the given {@link ClusterAction} against it.
           * 从已得的客户端命令行选项取回集群客户端并运行已获取的集群行为与它进行竞争
           *
           * @param activeCommandLine to create the {@link ClusterDescriptor} from
           * 集群描述器由参数活跃的命令行创建
           * @param commandLine containing the parsed command line options
           * commandLine参数 包含 解析过的命令行选项 
           *
           * @param clusterAction the cluster action to run against the retrieved {@link ClusterClient}.
           * 集群行为参数为与取回的集群客户端进行竞争
           * @param <T> type of the cluster id 集群类型
           * @throws FlinkException if something goes wrong 异常
           */
          private <T> void runClusterAction(CustomCommandLine<T> activeCommandLine, CommandLine commandLine, ClusterAction<T> clusterAction) throws FlinkException {
            final ClusterDescriptor<T> clusterDescriptor = activeCommandLine.createClusterDescriptor(commandLine);
        
                 // 获取集群Id
            final T clusterId = activeCommandLine.getClusterId(commandLine);
        
                 // 如果集群id'为空
            if (clusterId == null) {
              throw new FlinkException("No cluster id was specified. Please specify a cluster to which " +
                "you would like to connect.");
            } else {
              try {
                         // 通过集群描述器获取集群客户端
                final ClusterClient<T> clusterClient = clusterDescriptor.retrieve(clusterId);
        
                try {
                  clusterAction.runAction(clusterClient);
                } finally {
                  try {
                    clusterClient.close();
                  } catch (Exception e) {
                    LOG.info("Could not properly shut down the cluster client.", e);
                  }
                }
              } finally {
                try {
                  clusterDescriptor.close();
                } catch (Exception e) {
                  LOG.info("Could not properly close the cluster descriptor.", e);
                }
              }
            }
          }
        
          /**
           * Internal interface to encapsulate cluster actions which are executed via the {@link ClusterClient}.
           * 封装了集群内部行为的内部接口, 可以通过集群客户端执行
           *
           * @param <T> type of the cluster id
           */
          @FunctionalInterface
          private interface ClusterAction<T> {
        
            /**
             * Run the cluster action with the given {@link ClusterClient}.
             * 根据已得的集群客户端来运行集群行为
             * @param clusterClient to run the cluster action against
             * @throws FlinkException if something goes wrong
             */
            void runAction(ClusterClient<T> clusterClient) throws FlinkException;
          }
        
          // --------------------------------------------------------------------------------------------
          //  Entry point for executable 可执行程序的进入点
          // --------------------------------------------------------------------------------------------
        
          /**
           * Parses the command line arguments and starts the requested action.
           * 解析命令行参数并开启请求的行为
           *
           * @param args command line arguments of the client.
           * @return The return code of the program
           */
          public int parseParameters(String[] args) {
        
            // check for action 检查行为(是否存在)
            if (args.length < 1) {
              CliFrontendParser.printHelp(customCommandLines);
              System.out.println("Please specify an action.");
              return 1;
            }
        
            // get action 获取行为
            String action = args[0];
        
            // remove action from parameters 从参数中移除行为
            final String[] params = Arrays.copyOfRange(args, 1, args.length);
        
            try {
              // do action 执行行为 run,list,inform,cancel,stop,savepoint,
                     // -h,--help, -v, --version, 默认为无效行为, 并打印有效行为选项
              switch (action) {
                case ACTION_RUN:
                  run(params);
                  return 0;
                case ACTION_LIST:
                  list(params);
                  return 0;
                case ACTION_INFO:
                  info(params);
                  return 0;
                case ACTION_CANCEL:
                  cancel(params);
                  return 0;
                case ACTION_STOP:
                  stop(params);
                  return 0;
                case ACTION_SAVEPOINT:
                  savepoint(params);
                  return 0;
                case "-h":
                case "--help":
                  CliFrontendParser.printHelp(customCommandLines);
                  return 0;
                case "-v":
                case "--version":
                  String version = EnvironmentInformation.getVersion();
                  String commitID = EnvironmentInformation.getRevisionInformation().commitId;
                  System.out.print("Version: " + version);
                  System.out.println(commitID.equals(EnvironmentInformation.UNKNOWN) ? "" : ", Commit ID: " + commitID);
                  return 0;
                default:
                  System.out.printf("\"%s\" is not a valid action.\n", action);
                  System.out.println();
                  System.out.println("Valid actions are \"run\", \"list\", \"info\", \"savepoint\", \"stop\", or \"cancel\".");
                  System.out.println();
                  System.out.println("Specify the version option (-v or --version) to print Flink version.");
                  System.out.println();
                  System.out.println("Specify the help option (-h or --help) to get help on the command.");
                  return 1;
              }
            } catch (CliArgsException ce) {
              return handleArgException(ce);
            } catch (ProgramParametrizationException ppe) {
              return handleParametrizationException(ppe);
            } catch (ProgramMissingJobException pmje) {
              return handleMissingJobException();
            } catch (Exception e) {
            return handleError(e);
            }
          }
        
          /**
           * Submits the job based on the arguments.
           * 基于参数提交任务
           */
          public static void main(final String[] args) {
            EnvironmentInformation.logEnvironmentInfo(LOG, "Command Line Client", args);
        
            // 1. find the configuration directory
                 // 查找配置文件目录
            final String configurationDirectory = getConfigurationDirectoryFromEnv();
        
            // 2. load the global configuration
                 // 加载全局配置
            final Configuration configuration = GlobalConfiguration.loadConfiguration(configurationDirectory);
        
            // 3. load the custom command lines
                 // 加载客户端命令行
            final List<CustomCommandLine<?>> customCommandLines = loadCustomCommandLines(
              configuration,
              configurationDirectory);
        
            try {
              final CliFrontend cli = new CliFrontend(
                configuration,
                customCommandLines);
              // 通过安全工具包将安全配置文件存储到本地maven仓库
              SecurityUtils.install(new SecurityConfiguration(cli.configuration));
                     // 安全运行参数解析并获取返回的代码, runSecured方法可能需要实现Callable接口
              int retCode = SecurityUtils.getInstalledContext()
                  .runSecured(() -> cli.parseParameters(args));
              System.exit(retCode);
            }
            catch (Throwable t) {
              final Throwable strippedThrowable = ExceptionUtils.stripException(t, UndeclaredThrowableException.class);
                     // 打印: 运行命令行时发生致命的错误:xxx
              LOG.error("Fatal error while running command line interface.", strippedThrowable);
              strippedThrowable.printStackTrace();
              System.exit(31);
            }
          }
        
          // --------------------------------------------------------------------------------------------
          //  Miscellaneous Utilities
             // 各种各样的工具包
          // --------------------------------------------------------------------------------------------
        
            // 从环境中获取配置文件目录
          public static String getConfigurationDirectoryFromEnv() {
                 // 获取环境位置
            String location = System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR);
        
                 // 如果位置不为空
            if (location != null) {
                     // 如果该位置有文件存在, 就返回该位置, 否则就报运行时错误
              if (new File(location).exists()) {
                return location;
              }
              else {
                throw new RuntimeException("The configuration directory '" + location + "', specified in the '" +
                  ConfigConstants.ENV_FLINK_CONF_DIR + "' environment variable, does not exist.");
              }
            }
                // 如果位置为空但存在应变计划1
            else if (new File(CONFIG_DIRECTORY_FALLBACK_1).exists()) {
                     // 将位置设置为应变计划1
              location = CONFIG_DIRECTORY_FALLBACK_1;
            }
                // 如果位置为空但存在应变计划2
            else if (new File(CONFIG_DIRECTORY_FALLBACK_2).exists()) {
                     // 将位置设置为应变计划2
              location = CONFIG_DIRECTORY_FALLBACK_2;
            }
                // 如果位置为空且没有应变计划1和应变计划, 就抛出运行时异常
            else {
              throw new RuntimeException("The configuration directory was not specified. " +
                  "Please specify the directory containing the configuration file through the '" +
                ConfigConstants.ENV_FLINK_CONF_DIR + "' environment variable.");
            }
            return location;
          }
        
          /**
           * Writes the given job manager address to the associated configuration object.
           * 将已得的任务管理器地址写入到相关联的配置文件对象
           *
           * @param address Address to write to the configuration
           * @param config The configuration to write to
           */
          static void setJobManagerAddressInConfig(Configuration config, InetSocketAddress address) {
                // 我其实很不明白它为何要用setString, setInteger来命名这样的方法, 明明内部是将任务管理器地址 和 剩余的选项 的地址 和 端口的 k,v信息 存储到配置文件中, 取这样的名字比较让人困惑, 而且design pattern中说过 尽量少用装饰器(Decorator) 模式, 容易造成困惑。
            config.setString(JobManagerOptions.ADDRESS, address.getHostString());
            config.setInteger(JobManagerOptions.PORT, address.getPort());
            config.setString(RestOptions.ADDRESS, address.getHostString());
            config.setInteger(RestOptions.PORT, address.getPort());
          }
        
            // 根据配置文件和配置文件目录加载客户端命令行, 返回客户端命令行的一个列表
          public static List<CustomCommandLine<?>> loadCustomCommandLines(Configuration configuration, String configurationDirectory) {
            List<CustomCommandLine<?>> customCommandLines = new ArrayList<>(2);
        
            //  Command line interface of the YARN session, with a special initialization here to prefix all options with y/yarn.
                 // Hadoop YARN(Yet Another resource negotiator) 协调器 的命令行接口, 通过特殊的初始化在所有选项前添加前缀 y 获取 yarn
            //  Tips: DefaultCLI must be added at last, because getActiveCustomCommandLine(..) will get the active CustomCommandLine in order and DefaultCLI isActive always return true.
                // 由于getActiveCustomCommandLine()[获取活跃的客户端命令行] 方法会将客户端命令行按字典序排序 而 默认命令型 一直是活跃的,所以需要将默认命令行添加到最后
            final String flinkYarnSessionCLI = "org.apache.flink.yarn.cli.FlinkYarnSessionCli";
            try {
              customCommandLines.add(
                loadCustomCommandLine(flinkYarnSessionCLI,
                  configuration,
                  configurationDirectory,
                  "y",
                  "yarn"));
            } catch (NoClassDefFoundError | Exception e) {
              LOG.warn("Could not load CLI class {}.", flinkYarnSessionCLI, e);
            }
        
            customCommandLines.add(new DefaultCLI(configuration));
        
            return customCommandLines;
          }
        
          // --------------------------------------------------------------------------------------------
          //  Custom command-line 客户端命令行
          // --------------------------------------------------------------------------------------------
        
          /**
           * Gets the custom command-line for the arguments.
           * 获取客户端命令行的参数
           * @param commandLine The input to the command-line.
           * @return custom command-line which is active (may only be one at a time)
           */
          public CustomCommandLine<?> getActiveCustomCommandLine(CommandLine commandLine) {
            for (CustomCommandLine<?> cli : customCommandLines) {
              if (cli.isActive(commandLine)) {
                return cli;
              }
            }
            throw new IllegalStateException("No command-line ran.");
          }
        
          /**
           * Loads a class from the classpath that implements the CustomCommandLine interface.
           * 从实现了客户端命令行接口的 类路径加载该类
           * @param className The fully-qualified class name to load.
           * @param params The constructor parameters
           */
          private static CustomCommandLine<?> loadCustomCommandLine(String className, Object... params) throws IllegalAccessException, InvocationTargetException, InstantiationException, ClassNotFoundException, NoSuchMethodException {
        
            Class<? extends CustomCommandLine> customCliClass =
              Class.forName(className).asSubclass(CustomCommandLine.class);
        
            // construct class types from the parameters
                 // 从参数构造类的类型
            Class<?>[] types = new Class<?>[params.length];
            for (int i = 0; i < params.length; i++) {
                    // 检查非空
              Preconditions.checkNotNull(params[i], "Parameters for custom command-lines may not be null.");
                    // 获取类型
            types[i] = params[i].getClass();
            }
        
                // 通过反射获取构造器
            Constructor<? extends CustomCommandLine> constructor = customCliClass.getConstructor(types);
        
          return constructor.newInstance(params);
          }
        
        }
        
        
        • CliFrontendParser 客户端前端解析器
        package org.apache.flink.client.cli;
        
        import org.apache.flink.configuration.CheckpointingOptions;
        import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
        
        import org.apache.commons.cli.CommandLine;
        import org.apache.commons.cli.DefaultParser;
        import org.apache.commons.cli.HelpFormatter;
        import org.apache.commons.cli.Option;
        import org.apache.commons.cli.Options;
        import org.apache.commons.cli.ParseException;
        
        import javax.annotation.Nullable;
        
        import java.util.Collection;
        
        /**
         * A simple command line parser (based on Apache Commons CLI) that extracts command
         * line options.
         * 一个基于apache Commons的简单命令行客户端, 用于提取命令行选项
         */
        public class CliFrontendParser {
        
            // 帮助选项 false 为 是否有参数的boolean标签
          static final Option HELP_OPTION = new Option("h", "help", false,
              "Show the help message for the CLI Frontend or the action.");
        
            // jar 文件选项 有参数
          static final Option JAR_OPTION = new Option("j", "jarfile", true, "Flink program JAR file.");
        
            // 类选项 有参数
          static final Option CLASS_OPTION = new Option("c", "class", true,
              "Class with the program entry point (\"main()\" method or \"getPlan()\" method). Only needed if the " +
              "JAR file does not specify the class in its manifest.");
            // 类路径选项 有参数
          static final Option CLASSPATH_OPTION = new Option("C", "classpath", true, "Adds a URL to each user code " +
              "classloader  on all nodes in the cluster. The paths must specify a protocol (e.g. file://) and be " +
                  "accessible on all nodes (e.g. by means of a NFS share). You can use this option multiple " +
                  "times for specifying more than one URL. The protocol must be supported by the " +
                  "{@link java.net.URLClassLoader}.");
        
            // 并行数选项
          public static final Option PARALLELISM_OPTION = new Option("p", "parallelism", true,
              "The parallelism with which to run the program. Optional flag to override the default value " +
              "specified in the configuration.");
            // 记录选项
          static final Option LOGGING_OPTION = new Option("q", "sysoutLogging", false, "If present, " +
              "suppress logging output to standard out.");
        
            // 分类选项
          public static final Option DETACHED_OPTION = new Option("d", "detached", false, "If present, runs " +
              "the job in detached mode");
        
            // 依附于退出的选项, 如linux指令ctrl + C
          public static final Option SHUTDOWN_IF_ATTACHED_OPTION = new Option(
            "sae", "shutdownOnAttachedExit", false,
            "If the job is submitted in attached mode, perform a best-effort cluster shutdown " +
              "when the CLI is terminated abruptly, e.g., in response to a user interrupt, such as typing Ctrl + C.");
        
          /**
           * @deprecated use non-prefixed variant {@link #DETACHED_OPTION} for both YARN and non-YARN deployments
           * 使用没有前缀的变量部署yarn和非yarn模式(已过时, 不用看了)
           */
          @Deprecated
          public static final Option YARN_DETACHED_OPTION = new Option("yd", "yarndetached", false, "If present, runs " +
            "the job in detached mode (deprecated; use non-YARN specific option instead)");
        
            // 参数选项
          static final Option ARGS_OPTION = new Option("a", "arguments", true,
              "Program arguments. Arguments can also be added without -a, simply as trailing parameters.");
        
            // 地址选项
          public static final Option ADDRESS_OPTION = new Option("m", "jobmanager", true,
              "Address of the JobManager (master) to which to connect. " +
              "Use this flag to connect to a different JobManager than the one specified in the configuration.");
        
            // 保存点选项
          public static final Option SAVEPOINT_PATH_OPTION = new Option("s", "fromSavepoint", true,
              "Path to a savepoint to restore the job from (for example hdfs:///flink/savepoint-1537).");
        
            // 保存点允许没有恢复状态选项
          public static final Option SAVEPOINT_ALLOW_NON_RESTORED_OPTION = new Option("n", "allowNonRestoredState", false,
              "Allow to skip savepoint state that cannot be restored. " +
                  "You need to allow this if you removed an operator from your " +
                  "program that was part of the program when the savepoint was triggered.");
        
            // 保存点安排选项
          static final Option SAVEPOINT_DISPOSE_OPTION = new Option("d", "dispose", true,
              "Path of savepoint to dispose.");
        
          // list specific options 特殊选项列表
              // 运行选项
          static final Option RUNNING_OPTION = new Option("r", "running", false,
              "Show only running programs and their JobIDs");
        
              // 已安排的选项
          static final Option SCHEDULED_OPTION = new Option("s", "scheduled", false,
              "Show only scheduled programs and their JobIDs");
        
              // 所有选项
          static final Option ALL_OPTION = new Option("a", "all", false,
            "Show all programs and their JobIDs");
        
              // zookeeper名称空间选项
          static final Option ZOOKEEPER_NAMESPACE_OPTION = new Option("z", "zookeeperNamespace", true,
              "Namespace to create the Zookeeper sub-paths for high availability mode");
        
              // 依据保存点取消任务选项 (已过时, 然而还能用......)
          static final Option CANCEL_WITH_SAVEPOINT_OPTION = new Option(
              "s", "withSavepoint", true, "**DEPRECATION WARNING**: " +
              "Cancelling a job with savepoint is deprecated. Use \"stop\" instead. \n Trigger" +
              " savepoint and cancel job. The target directory is optional. If no directory is " +
              "specified, the configured default directory (" +
              CheckpointingOptions.SAVEPOINT_DIRECTORY.key() + ") is used.");
        
            // 停止并设置保存点选项
          public static final Option STOP_WITH_SAVEPOINT_PATH = new Option("p", "savepointPath", true,
              "Path to the savepoint (for example hdfs:///flink/savepoint-1537). " +
                  "If no directory is specified, the configured default will be used (\"" + CheckpointingOptions.SAVEPOINT_DIRECTORY.key() + "\").");
        
            // 停止并排空选项(在生成保存点并停止Pipline[管道]前, 会将最大的水位线发送至JobManager)
          public static final Option STOP_AND_DRAIN = new Option("d", "drain", false,
              "Send MAX_WATERMARK before taking the savepoint and stopping the pipelne.");
            
             // python选项
          static final Option PY_OPTION = new Option("py", "python", true,
            "Python script with the program entry point. " +
              "The dependent resources can be configured with the `--pyFiles` option.");
        
             // python文件选项
          static final Option PYFILES_OPTION = new Option("pyfs", "pyFiles", true,
            "Attach custom python files for job. " +
              "Comma can be used as the separator to specify multiple files. " +
              "The standard python resource file suffixes such as .py/.egg/.zip are all supported." +
              "(eg: --pyFiles file:///tmp/myresource.zip,hdfs:///$namenode_address/myresource2.zip)");
        
            // python 模块选项
          static final Option PYMODULE_OPTION = new Option("pym", "pyModule", true,
            "Python module with the program entry point. " +
              "This option must be used in conjunction with `--pyFiles`.");
        
            // 这是一个静态块
          static {
                 // 不需要帮助选项(不是必须要有的)
            HELP_OPTION.setRequired(false);
                 // 不需要jar选项(不是必须要有的)
            JAR_OPTION.setRequired(false);
                 // 设置参数名(不是必须要有的)
            JAR_OPTION.setArgName("jarfile");
        
                 // 不需要类选项(不是必须要有的)
            CLASS_OPTION.setRequired(false);
            CLASS_OPTION.setArgName("classname");
        
                 // 不需要类路径选项(不是必须要有的)
            CLASSPATH_OPTION.setRequired(false);
            CLASSPATH_OPTION.setArgName("url");
        
            ADDRESS_OPTION.setRequired(false);
            ADDRESS_OPTION.setArgName("host:port");
        
                 // 不需要并行量选项(不是必须要有的)
            PARALLELISM_OPTION.setRequired(false);
            PARALLELISM_OPTION.setArgName("parallelism");
        
                 // 不需要记录选项(不是必须要有的)
            LOGGING_OPTION.setRequired(false);
                 // 不需要分离选项(不是必须要有的)
            DETACHED_OPTION.setRequired(false);
                 // 不需要依附于退出的选项(不是必须要有的)
            SHUTDOWN_IF_ATTACHED_OPTION.setRequired(false);
                 // 不需要 yarn 分离选项(不是必须要有的)
            YARN_DETACHED_OPTION.setRequired(false);
        
                 // 不需要参数选项(不是必须要有的)
            ARGS_OPTION.setRequired(false);
            ARGS_OPTION.setArgName("programArgs");
                 // 设置参数为无限制的值(-2)
            ARGS_OPTION.setArgs(Option.UNLIMITED_VALUES);
        
                 // 不需要运行选项(不是必须要有的)
            RUNNING_OPTION.setRequired(false);
                 // 不需要已安排的选项(不是必须要有的)
            SCHEDULED_OPTION.setRequired(false);
        
                 // 不需要保存点选项(不是必须要有的)
            SAVEPOINT_PATH_OPTION.setRequired(false);
            SAVEPOINT_PATH_OPTION.setArgName("savepointPath");
        
                 // 保存点允许无回复选项
            SAVEPOINT_ALLOW_NON_RESTORED_OPTION.setRequired(false);
        
                 // Zookeeper 名称空间选项
            ZOOKEEPER_NAMESPACE_OPTION.setRequired(false);
            ZOOKEEPER_NAMESPACE_OPTION.setArgName("zookeeperNamespace");
        
                 // 根据保存点取消选项
            CANCEL_WITH_SAVEPOINT_OPTION.setRequired(false);
            CANCEL_WITH_SAVEPOINT_OPTION.setArgName("targetDirectory");
            CANCEL_WITH_SAVEPOINT_OPTION.setOptionalArg(true);
        
                 
            STOP_WITH_SAVEPOINT_PATH.setRequired(false);
            STOP_WITH_SAVEPOINT_PATH.setArgName("savepointPath");
            STOP_WITH_SAVEPOINT_PATH.setOptionalArg(true);
        
                 // 停止并清空管道选项
            STOP_AND_DRAIN.setRequired(false);
        
                 // python选项
            PY_OPTION.setRequired(false);
            PY_OPTION.setArgName("python");
        
                 // python文件选项
            PYFILES_OPTION.setRequired(false);
            PYFILES_OPTION.setArgName("pyFiles");
        
                 // python模块选项
            PYMODULE_OPTION.setRequired(false);
            PYMODULE_OPTION.setArgName("pyModule");
          }
            // 运行选项
          private static final Options RUN_OPTIONS = getRunCommandOptions();
            // 构造常用选项
          private static Options buildGeneralOptions(Options options) {
                 // 添加帮助选项
            options.addOption(HELP_OPTION);
            // backwards compatibility: ignore verbose flag (-v)
                 // 向后兼容性 忽略冗长的标识
            options.addOption(new Option("v", "verbose", false, "This option is deprecated."));
            return options;
          }
        
             // 获取程序特殊选项
          private static Options getProgramSpecificOptions(Options options) {
                 // 添加以下选项
            options.addOption(JAR_OPTION);
            options.addOption(CLASS_OPTION);
            options.addOption(CLASSPATH_OPTION);
            options.addOption(PARALLELISM_OPTION);
            options.addOption(ARGS_OPTION);
            options.addOption(LOGGING_OPTION);
            options.addOption(DETACHED_OPTION);
            options.addOption(SHUTDOWN_IF_ATTACHED_OPTION);
            options.addOption(YARN_DETACHED_OPTION);
            options.addOption(PY_OPTION);
            options.addOption(PYFILES_OPTION);
            options.addOption(PYMODULE_OPTION);
            return options;
          }
        
            // 获取没有已过时的程序特殊选项
          private static Options getProgramSpecificOptionsWithoutDeprecatedOptions(Options options) {
            options.addOption(CLASS_OPTION);
            options.addOption(CLASSPATH_OPTION);
            options.addOption(PARALLELISM_OPTION);
            options.addOption(LOGGING_OPTION);
            options.addOption(DETACHED_OPTION);
            options.addOption(SHUTDOWN_IF_ATTACHED_OPTION);
            options.addOption(PY_OPTION);
            options.addOption(PYFILES_OPTION);
            options.addOption(PYMODULE_OPTION);
            return options;
          }
        
             // 获取运行命令行选项
          public static Options getRunCommandOptions() {
            Options options = buildGeneralOptions(new Options());
            options = getProgramSpecificOptions(options);
            options.addOption(SAVEPOINT_PATH_OPTION);
            return options.addOption(SAVEPOINT_ALLOW_NON_RESTORED_OPTION);
          }
             // 获取通知命令行选项
          static Options getInfoCommandOptions() {
            Options options = buildGeneralOptions(new Options());
            return getProgramSpecificOptions(options);
          }
             // 获取命令行选项列表
          static Options getListCommandOptions() {
            Options options = buildGeneralOptions(new Options());
            options.addOption(ALL_OPTION);
            options.addOption(RUNNING_OPTION);
            return options.addOption(SCHEDULED_OPTION);
          }
             // 获取取消命令行选项
          static Options getCancelCommandOptions() {
            Options options = buildGeneralOptions(new Options());
            return options.addOption(CANCEL_WITH_SAVEPOINT_OPTION);
          }
             // 获取停止命令行选项
          static Options getStopCommandOptions() {
            return buildGeneralOptions(new Options())
                .addOption(STOP_WITH_SAVEPOINT_PATH)
                .addOption(STOP_AND_DRAIN);
          }
              // 获取保存点命令行选项
          static Options getSavepointCommandOptions() {
            Options options = buildGeneralOptions(new Options());
            options.addOption(SAVEPOINT_DISPOSE_OPTION);
            return options.addOption(JAR_OPTION);
          }
        
          // --------------------------------------------------------------------------------------------
          //  Help 
          // --------------------------------------------------------------------------------------------
             // 获取没有过时的运行选项
          private static Options getRunOptionsWithoutDeprecatedOptions(Options options) {
                 // 获取没有已过时的程序特殊选项
            Options o = getProgramSpecificOptionsWithoutDeprecatedOptions(options);
                 // 添加保存点选项
            o.addOption(SAVEPOINT_PATH_OPTION);
                 // 添加允许无恢复的保存点选项 并 返回 o
            return o.addOption(SAVEPOINT_ALLOW_NON_RESTORED_OPTION);
          }
        
            // 获取没有过时的通知选项
          private static Options getInfoOptionsWithoutDeprecatedOptions(Options options) {
            options.addOption(CLASS_OPTION);
            options.addOption(PARALLELISM_OPTION);
            return options;
          }
            // 获取没有过时的选项列表
          private static Options getListOptionsWithoutDeprecatedOptions(Options options) {
            options.addOption(RUNNING_OPTION);
            return options.addOption(SCHEDULED_OPTION);
          }
             // 获取没有过时的取消选项
          private static Options getCancelOptionsWithoutDeprecatedOptions(Options options) {
            return options.addOption(CANCEL_WITH_SAVEPOINT_OPTION);
          }
             // 获取没有过时的停止选项
          private static Options getStopOptionsWithoutDeprecatedOptions(Options options) {
            return options
                .addOption(STOP_WITH_SAVEPOINT_PATH)
                .addOption(STOP_AND_DRAIN);
          }
             // 获取没有过时的保存点选项
          private static Options getSavepointOptionsWithoutDeprecatedOptions(Options options) {
            options.addOption(SAVEPOINT_DISPOSE_OPTION);
            options.addOption(JAR_OPTION);
            return options;
          }
        
          /**
           * Prints the help for the client.
           * 为客户打印帮助信息
           */
          public static void printHelp(Collection<CustomCommandLine<?>> customCommandLines) {
            System.out.println("./flink <ACTION> [OPTIONS] [ARGUMENTS]");
            System.out.println();
            System.out.println("The following actions are available:");
        
            printHelpForRun(customCommandLines);
            printHelpForInfo();
            printHelpForList(customCommandLines);
            printHelpForStop(customCommandLines);
            printHelpForCancel(customCommandLines);
            printHelpForSavepoint(customCommandLines);
        
            System.out.println();
          }
            // 打印运行的帮助信息
          public static void printHelpForRun(Collection<CustomCommandLine<?>> customCommandLines) {
            HelpFormatter formatter = new HelpFormatter();
            formatter.setLeftPadding(5);
            formatter.setWidth(80);
        
            System.out.println("\nAction \"run\" compiles and runs a program.");
            System.out.println("\n  Syntax: run [OPTIONS] <jar-file> <arguments>");
            formatter.setSyntaxPrefix("  \"run\" action options:");
            formatter.printHelp(" ", getRunOptionsWithoutDeprecatedOptions(new Options()));
        
            printCustomCliOptions(customCommandLines, formatter, true);
        
            System.out.println();
          }
             // 打印通知的帮助信息
          public static void printHelpForInfo() {
            HelpFormatter formatter = new HelpFormatter();
            formatter.setLeftPadding(5);
            formatter.setWidth(80);
        
            System.out.println("\nAction \"info\" shows the optimized execution plan of the program (JSON).");
            System.out.println("\n  Syntax: info [OPTIONS] <jar-file> <arguments>");
            formatter.setSyntaxPrefix("  \"info\" action options:");
            formatter.printHelp(" ", getInfoOptionsWithoutDeprecatedOptions(new Options()));
        
            System.out.println();
          }
             // 打印帮助信息列表
          public static void printHelpForList(Collection<CustomCommandLine<?>> customCommandLines) {
            HelpFormatter formatter = new HelpFormatter();
            formatter.setLeftPadding(5);
            formatter.setWidth(80);
        
            System.out.println("\nAction \"list\" lists running and scheduled programs.");
            System.out.println("\n  Syntax: list [OPTIONS]");
            formatter.setSyntaxPrefix("  \"list\" action options:");
            formatter.printHelp(" ", getListOptionsWithoutDeprecatedOptions(new Options()));
        
            printCustomCliOptions(customCommandLines, formatter, false);
        
            System.out.println();
          }
             // 打印停止的帮助信息
          public static void printHelpForStop(Collection<CustomCommandLine<?>> customCommandLines) {
            HelpFormatter formatter = new HelpFormatter();
            formatter.setLeftPadding(5);
            formatter.setWidth(80);
        
            System.out.println("\nAction \"stop\" stops a running program with a savepoint (streaming jobs only).");
            System.out.println("\n  Syntax: stop [OPTIONS] <Job ID>");
            formatter.setSyntaxPrefix("  \"stop\" action options:");
            formatter.printHelp(" ", getStopOptionsWithoutDeprecatedOptions(new Options()));
        
            printCustomCliOptions(customCommandLines, formatter, false);
        
            System.out.println();
          }
             // 打印取消的帮助信息
          public static void printHelpForCancel(Collection<CustomCommandLine<?>> customCommandLines) {
            HelpFormatter formatter = new HelpFormatter();
            formatter.setLeftPadding(5);
            formatter.setWidth(80);
        
            System.out.println("\nAction \"cancel\" cancels a running program.");
            System.out.println("\n  Syntax: cancel [OPTIONS] <Job ID>");
            formatter.setSyntaxPrefix("  \"cancel\" action options:");
            formatter.printHelp(" ", getCancelOptionsWithoutDeprecatedOptions(new Options()));
        
            printCustomCliOptions(customCommandLines, formatter, false);
        
            System.out.println();
          }
        
            // 打印保存点帮助信息
          public static void printHelpForSavepoint(Collection<CustomCommandLine<?>> customCommandLines) {
            HelpFormatter formatter = new HelpFormatter();
            formatter.setLeftPadding(5);
            formatter.setWidth(80);
        
            System.out.println("\nAction \"savepoint\" triggers savepoints for a running job or disposes existing ones.");
            System.out.println("\n  Syntax: savepoint [OPTIONS] <Job ID> [<target directory>]");
            formatter.setSyntaxPrefix("  \"savepoint\" action options:");
            formatter.printHelp(" ", getSavepointOptionsWithoutDeprecatedOptions(new Options()));
        
            printCustomCliOptions(customCommandLines, formatter, false);
        
            System.out.println();
          }
        
          /**
           * Prints custom cli options.
           * 打印客户选项
           * @param formatter The formatter to use for printing 打印格式
           * @param runOptions True if the run options should be printed, False to print only general options 
           * 是否打印运行选项, true 就会打印, 否则只会打印普通选项
           */
          private static void printCustomCliOptions(
              Collection<CustomCommandLine<?>> customCommandLines,
              HelpFormatter formatter,
              boolean runOptions) {
            // prints options from all available command-line classes
                 // 从所有可获取的命令行类中打印选项 
            for (CustomCommandLine cli: customCommandLines) {
              formatter.setSyntaxPrefix("  Options for " + cli.getId() + " mode:");
              Options customOpts = new Options();
              cli.addGeneralOptions(customOpts);
              if (runOptions) {
                cli.addRunOptions(customOpts);
              }
              formatter.printHelp(" ", customOpts);
              System.out.println();
            }
          }
             // 保存点恢复设置
          public static SavepointRestoreSettings createSavepointRestoreSettings(CommandLine commandLine) {
                 // 如果有保存点路径选项
            if (commandLine.hasOption(SAVEPOINT_PATH_OPTION.getOpt())) {
                     // 获取保存点路径
              String savepointPath = commandLine.getOptionValue(SAVEPOINT_PATH_OPTION.getOpt());
                     // 是否允许恢复度状态
              boolean allowNonRestoredState = commandLine.hasOption(SAVEPOINT_ALLOW_NON_RESTORED_OPTION.getOpt());
                     // 根据保存点路径 和 是否允许恢复状态的标签, 生成新的保存点恢复设置, 返回
              return SavepointRestoreSettings.forPath(savepointPath, allowNonRestoredState);
            } else {
                     // 否则就不设置新保存点恢复设置
              return SavepointRestoreSettings.none();
            }
          }
        
          // --------------------------------------------------------------------------------------------
          //  Line Parsing 行解析
          // --------------------------------------------------------------------------------------------
             // 解析运行命令行
          public static RunOptions parseRunCommand(String[] args) throws CliArgsException {
            try {
                     // 创建解析器
              DefaultParser parser = new DefaultParser();
                     // 通过解析器解析运行选项, 参数(选项为空会停止解析) 获取命令行
              CommandLine line = parser.parse(RUN_OPTIONS, args, true);
              return new RunOptions(line);
            }
            catch (ParseException e) {
              throw new CliArgsException(e.getMessage());
            }
          }
             // 解析方法  参数: 选项, 参数, 没有选项时是否停止解析的标签 
          public static CommandLine parse(Options options, String[] args, boolean stopAtNonOptions) throws CliArgsException {
            final DefaultParser parser = new DefaultParser();
        
            try {
              return parser.parse(options, args, stopAtNonOptions);
            } catch (ParseException e) {
              throw new CliArgsException(e.getMessage());
            }
          }
        
          /**
           * Merges the given {@link Options} into a new Options object.
           * 合并已给的选项成一个新的选项对象
           * @param optionsA options to merge, can be null if none
           * 合并选项A, 可为空
           * @param optionsB options to merge, can be null if none
           * 合并选项B, 可为空
           * @return
           */
          public static Options mergeOptions(@Nullable Options optionsA, @Nullable Options optionsB) {
            final Options resultOptions = new Options();
                 // 如果不为空就遍历选项A, 将其添加到结果选项中
            if (optionsA != null) {
              for (Option option : optionsA.getOptions()) {
                resultOptions.addOption(option);
              }
            }
                 // 如果不为空就遍历选项B, 将其添加到结果选项中
            if (optionsB != null) {
              for (Option option : optionsB.getOptions()) {
                resultOptions.addOption(option);
              }
            }
        
            return resultOptions;
          }
        }
        
        • DefaultCLI 默认客户端
        package org.apache.flink.client.cli;
        
        import org.apache.flink.client.deployment.ClusterSpecification;
        import org.apache.flink.client.deployment.StandaloneClusterDescriptor;
        import org.apache.flink.client.deployment.StandaloneClusterId;
        import org.apache.flink.configuration.Configuration;
        import org.apache.flink.util.FlinkException;
        
        import org.apache.commons.cli.CommandLine;
        import org.apache.commons.cli.Options;
        
        import javax.annotation.Nullable;
        
        /**
         * The default CLI which is used for interaction with standalone clusters.
         * 默认客户端被用于和单节点集群交互
         * 继承了客户端命令行抽象类, 其泛型为单节点集群Id类
         */
        public class DefaultCLI extends AbstractCustomCommandLine<StandaloneClusterId> {
        
          public DefaultCLI(Configuration configuration) {
            super(configuration);
          }
        
            // 判断是否活跃
          @Override
          public boolean isActive(CommandLine commandLine) {
            // always active because we can try to read a JobManager address from the config
                 // 因为我们能从配置中读取一个任务管理器的地址, 所以命令行一直是活跃的
            return true;
          }
        
          @Override
          public String getId() {
            return "default";
          }
        
            // 添加通用选项
          @Override
          public void addGeneralOptions(Options baseOptions) {
            super.addGeneralOptions(baseOptions);
          }
        
             // 创建单节点集群描述器
          @Override
          public StandaloneClusterDescriptor createClusterDescriptor(
              CommandLine commandLine) throws FlinkException {
                 // applyCommandLineOptionsToConfiguration: 将命令行选项应用到配置(添加地址选项和zookeeper名称空间选项), 返回有效的配置文件
            final Configuration effectiveConfiguration = applyCommandLineOptionsToConfiguration(commandLine);
                 // 根据该文件创建单节点其群描述器对象
            return new StandaloneClusterDescriptor(effectiveConfiguration);
          }
        
            // 获取集群Id, 可为空
          @Override
          @Nullable
          public StandaloneClusterId getClusterId(CommandLine commandLine) {
                // 看到这个getInstance就知道StandaloneClusterId是一个单例, 点进去一看你会发现这是饿汉式的单例
            return StandaloneClusterId.getInstance();
          }
        
            // 获取集群规格
          @Override
          public ClusterSpecification getClusterSpecification(CommandLine commandLine) {
                // 这里使用了构造器模式来创建集群规格
            return new ClusterSpecification.ClusterSpecificationBuilder().createClusterSpecification();
          }
        }
        
        
        • CancelOptions 取消选项
        package org.apache.flink.client.cli;
        
        import org.apache.commons.cli.CommandLine;
        
        import static org.apache.flink.client.cli.CliFrontendParser.CANCEL_WITH_SAVEPOINT_OPTION;
        
        /**
         * Command line options for the CANCEL command.
         * 取消命令的命令行选项, 继承了命令行选项
         */
        public class CancelOptions extends CommandLineOptions {
        
            // 参数
          private final String[] args;
        
          /** 
          * Flag indicating whether to cancel with a savepoint. 
          * 表面是否根据保存点进行取消操作的标签
          */
          private final boolean withSavepoint;
        
          /** 
          * Optional target directory for the savepoint. Overwrites cluster   
          * default. 
          * 保存点的可选目标目录, 重写了默认集群
          */
          private final String targetDirectory;
        
          public CancelOptions(CommandLine line) {
            super(line);
            this.args = line.getArgs();
            this.withSavepoint = line.hasOption(CANCEL_WITH_SAVEPOINT_OPTION.getOpt());
            this.targetDirectory = line.getOptionValue(CANCEL_WITH_SAVEPOINT_OPTION.getOpt());
          }
            // 参数为空就创建空字符串数组, 有就获取该参数
          public String[] getArgs() {
            return args == null ? new String[0] : args;
          }
         
            // 是否根据保存点进行取消操作的标签
          public boolean isWithSavepoint() {
            return withSavepoint;
          }
            // 获取保存点的可选目标目录
          public String getSavepointTargetDirectory() {
            return targetDirectory;
          }
        }
        
        • CommandLineOptions 命令行选项
        package org.apache.flink.client.cli;
        
        import org.apache.commons.cli.CommandLine;
        
        import static org.apache.flink.client.cli.CliFrontendParser.HELP_OPTION;
        
        /**
         * Base class for all options parsed from the command line.
         * 所有从命令行解析的选项的基类
         * Contains options for printing help and the JobManager address.
         * 包含了打印帮助选项和任务管理器地址选项
         */
        public abstract class CommandLineOptions {
        
          private final boolean printHelp;
        
          protected CommandLineOptions(CommandLine line) {
            this.printHelp = line.hasOption(HELP_OPTION.getOpt());
          }
        
          public boolean isPrintHelp() {
            return printHelp;
          }
        }
        
        • InfoOptions 通知选项
        package org.apache.flink.client.cli;
        
        import org.apache.commons.cli.CommandLine;
        
        /**
         * Command line options for the INFO command.
         * 通知命令的命令行选项, 继承了程序选项类
         */
        public class InfoOptions extends ProgramOptions {
        
          public InfoOptions(CommandLine line) throws CliArgsException {
            super(line);
          }
        }
        
        • ListOptions 列表选项
        import org.apache.commons.cli.CommandLine;
        
        import static org.apache.flink.client.cli.CliFrontendParser.ALL_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.RUNNING_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.SCHEDULED_OPTION;
        
        /**
         * Command line options for the LIST command.
         * 列表命令的命令行选项
         */
        public class ListOptions extends CommandLineOptions {
        
            // 是否展示运行的程序
          private final boolean showRunning;
            // 是否展示已安排好的程序
          private final boolean showScheduled;
            // 是否展示所有
          private final boolean showAll;
        
          public ListOptions(CommandLine line) {
            super(line);
            this.showAll = line.hasOption(ALL_OPTION.getOpt());
            this.showRunning = line.hasOption(RUNNING_OPTION.getOpt());
            this.showScheduled = line.hasOption(SCHEDULED_OPTION.getOpt());
          }
        
          public boolean showRunning() {
            return showRunning;
          }
        
          public boolean showScheduled() {
            return showScheduled;
          }
        
          public boolean showAll() {
            return showAll;
          }
        }
        
        
        • ProgramOptions 程序选项
        package org.apache.flink.client.cli;
        
        import org.apache.flink.api.common.ExecutionConfig;
        import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
        
        import org.apache.commons.cli.CommandLine;
        
        import java.net.MalformedURLException;
        import java.net.URL;
        import java.util.ArrayList;
        import java.util.Arrays;
        import java.util.List;
        
        import static org.apache.flink.client.cli.CliFrontendParser.ARGS_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.CLASSPATH_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.CLASS_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.DETACHED_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.JAR_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.PARALLELISM_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.PYFILES_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.PYMODULE_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.PY_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.SHUTDOWN_IF_ATTACHED_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.YARN_DETACHED_OPTION;
        
        /**
         * Base class for command line options that refer to a JAR file program.
         * 是用于查阅jar文件命令行的命令行选项的基类。
         */
        public abstract class ProgramOptions extends CommandLineOptions {
        
            // jar文件路径
          private final String jarFilePath;
          // 进入点类
          private final String entryPointClass;
            // 类路径, 是URL的一个列表
          private final List<URL> classpaths;
            // 程序参数
          private final String[] programArgs;
        
            // 最终的并行量
          private final int parallelism;
            // 是否启用分离模式
          private final boolean detachedMode;
            // 是否启用关联退出
          private final boolean shutdownOnAttachedExit;
          // 保存点恢复设置
          private final SavepointRestoreSettings savepointSettings;
        
          /**
           * Flag indicating whether the job is a Python job.
           * 判断任务是否为python任务
           */
          private final boolean isPython;
        
          protected ProgramOptions(CommandLine line) throws CliArgsException {
            super(line);
        
            String[] args = line.hasOption(ARGS_OPTION.getOpt()) ?
              line.getOptionValues(ARGS_OPTION.getOpt()) :
              line.getArgs();
        
            this.entryPointClass = line.hasOption(CLASS_OPTION.getOpt()) ?
              line.getOptionValue(CLASS_OPTION.getOpt()) : null;
        
            isPython = line.hasOption(PY_OPTION.getOpt()) | line.hasOption(PYMODULE_OPTION.getOpt())
              | "org.apache.flink.client.python.PythonGatewayServer".equals(entryPointClass);
            // If specified the option -py(--python)
                 // 如果有标注 -py 或 --python选项
            if (line.hasOption(PY_OPTION.getOpt())) {
              // Cannot use option -py and -pym simultaneously.
                     // 不能同时使用 -py 和 -pym(python module)
              if (line.hasOption(PYMODULE_OPTION.getOpt())) {
                throw new CliArgsException("Cannot use option -py and -pym simultaneously.");
              }
              // The cli cmd args which will be transferred to PythonDriver will be transformed as follows:
                     // 客户端的cmd参数会被转换成python驱动器, 如下所示
              // CLI cmd : -py ${python.py} pyfs [optional] ${py-files} [optional] ${other args}.
              // PythonDriver args: py ${python.py} [optional] pyfs [optional] ${py-files} [optional] ${other args}.
              // -------------------------------transformed-------------------------------------------------------
              // e.g. -py wordcount.py(CLI cmd) -----------> py wordcount.py(PythonDriver args)
              // e.g. -py wordcount.py -pyfs file:///AAA.py,hdfs:///BBB.py --input in.txt --output out.txt(CLI cmd)
              //  -----> -py wordcount.py -pyfs file:///AAA.py,hdfs:///BBB.py --input in.txt --output out.txt(PythonDriver args)
                     // 新参数
              String[] newArgs;
                     // 参数标识
              int argIndex;
                     // 如果有python文件选项
              if (line.hasOption(PYFILES_OPTION.getOpt())) {
                newArgs = new String[args.length + 4];
                newArgs[2] = "-" + PYFILES_OPTION.getOpt();
                newArgs[3] = line.getOptionValue(PYFILES_OPTION.getOpt());
                argIndex = 4;
              } else {
                newArgs = new String[args.length + 2];
                argIndex = 2;
              }
              newArgs[0] = "-" + PY_OPTION.getOpt();
              newArgs[1] = line.getOptionValue(PY_OPTION.getOpt());
                     // 数组拷贝
              System.arraycopy(args, 0, newArgs, argIndex, args.length);
              args = newArgs;
            }
        
            // If specified the option -pym(--pyModule)
                 // 如果标注了pym选项
            if (line.hasOption(PYMODULE_OPTION.getOpt())) {
              // If you specify the option -pym, you should specify the option --pyFiles simultaneously.
                     // 如果你标注了 -pym, 你应当同时标注 --pyFiles
              if (!line.hasOption(PYFILES_OPTION.getOpt())) {
                throw new CliArgsException("-pym must be used in conjunction with `--pyFiles`");
              }
              // The cli cmd args which will be transferred to PythonDriver will be transformed as follows:
                     // 客户端cmd参数会被转换成python驱动器, 如下所示
              // CLI cmd : -pym ${py-module} -pyfs ${py-files} [optional] ${other args}.
              // PythonDriver args: -pym ${py-module} -pyfs ${py-files} [optional] ${other args}.
              // e.g. -pym AAA.fun -pyfs AAA.zip(CLI cmd) ----> -pym AAA.fun -pyfs AAA.zip(PythonDriver args)
              String[] newArgs = new String[args.length + 4];
              newArgs[0] = "-" + PYMODULE_OPTION.getOpt();
              newArgs[1] = line.getOptionValue(PYMODULE_OPTION.getOpt());
              newArgs[2] = "-" + PYFILES_OPTION.getOpt();
              newArgs[3] = line.getOptionValue(PYFILES_OPTION.getOpt());
                     // 数组拷贝
              System.arraycopy(args, 0, newArgs, 4, args.length);
              args = newArgs;
            }
                 // 如果命令行中有jar选项
            if (line.hasOption(JAR_OPTION.getOpt())) {
                     // 获取jar文件路径
              this.jarFilePath = line.getOptionValue(JAR_OPTION.getOpt());
                    // 如果不是python任务且参数存在
            } else if (!isPython && args.length > 0) {
              jarFilePath = args[0];
              args = Arrays.copyOfRange(args, 1, args.length);
            }
            else {
              jarFilePath = null;
            }
        
            this.programArgs = args;
        
                 // 类路径
            List<URL> classpaths = new ArrayList<URL>();
                 // 如果命令行中有类路径选项
            if (line.hasOption(CLASSPATH_OPTION.getOpt())) {
                     // 遍历选项值数组(通过类选项获取该String数组)
              for (String path : line.getOptionValues(CLASSPATH_OPTION.getOpt())) {
                try {
                  classpaths.add(new URL(path));
                } catch (MalformedURLException e) {
                  throw new CliArgsException("Bad syntax for classpath: " + path);
                }
              }
            }
            this.classpaths = classpaths;
        
                 // 如果有并行数选项
            if (line.hasOption(PARALLELISM_OPTION.getOpt())) {
                     // 从选项值解析到的String值
              String parString = line.getOptionValue(PARALLELISM_OPTION.getOpt());
              try {
                         // 解析获取并行数
                parallelism = Integer.parseInt(parString);
                         // 如果获取到的并行数小于等于0, 抛出数字格式异常
                if (parallelism <= 0) {
                  throw new NumberFormatException();
                }
              }
              catch (NumberFormatException e) {
                throw new CliArgsException("The parallelism must be a positive number: " + parString);
              }
            }
            else {
                     // 否则就使用默认并行数
              parallelism = ExecutionConfig.PARALLELISM_DEFAULT;
            }
                 // 如果有分离选项 或 有yarn分离选项和关联退出选项, 就获取分离模式
            detachedMode = line.hasOption(DETACHED_OPTION.getOpt()) || line.hasOption(
              YARN_DETACHED_OPTION.getOpt());
            shutdownOnAttachedExit = line.hasOption(SHUTDOWN_IF_ATTACHED_OPTION.getOpt());
        
                 // 根据命令行获取保存点设置
            this.savepointSettings = CliFrontendParser.createSavepointRestoreSettings(line);
          }
        
          public String getJarFilePath() {
            return jarFilePath;
          }
        
          public String getEntryPointClassName() {
            return entryPointClass;
          }
        
          public List<URL> getClasspaths() {
            return classpaths;
          }
        
          public String[] getProgramArgs() {
            return programArgs;
          }
        
          public int getParallelism() {
            return parallelism;
          }
        
          public boolean getDetachedMode() {
            return detachedMode;
          }
        
          public boolean isShutdownOnAttachedExit() {
            return shutdownOnAttachedExit;
          }
        
          public SavepointRestoreSettings getSavepointRestoreSettings() {
            return savepointSettings;
          }
        
          /**
           * Indicates whether the job is a Python job.
           * 显示该任务是否为python任务
           */
          public boolean isPython() {
            return isPython;
          }
        }
        
        • RunOptions
        package org.apache.flink.client.cli;
        
        import org.apache.commons.cli.CommandLine;
        
        /**
         * Command line options for the RUN command.
         * 运行命令的命令行选项
         */
        public class RunOptions extends ProgramOptions {
        
          public RunOptions(CommandLine line) throws CliArgsException {
            super(line);
          }
        }
        
        • SavepointOptions
        package org.apache.flink.client.cli;
        
        import org.apache.commons.cli.CommandLine;
        
        import static org.apache.flink.client.cli.CliFrontendParser.JAR_OPTION;
        import static org.apache.flink.client.cli.CliFrontendParser.SAVEPOINT_DISPOSE_OPTION;
        
        /**
         * Command line options for the SAVEPOINT command.
         * 保存点命令的命令行选项
         */
        public class SavepointOptions extends CommandLineOptions {
        
          private final String[] args;
            // 是否释放资源
          private boolean dispose;
            // 释放保存点的路径
          private String disposeSavepointPath;
          private String jarFile;
        
          public SavepointOptions(CommandLine line) {
            super(line);
            args = line.getArgs();
            dispose = line.hasOption(SAVEPOINT_DISPOSE_OPTION.getOpt());
            disposeSavepointPath = line.getOptionValue(SAVEPOINT_DISPOSE_OPTION.getOpt());
            jarFile = line.getOptionValue(JAR_OPTION.getOpt());
          }
        
          public String[] getArgs() {
            return args == null ? new String[0] : args;
          }
        
          public boolean isDispose() {
            return dispose;
          }
        
          public String getSavepointPath() {
            return disposeSavepointPath;
          }
        
          public String getJarFilePath() {
            return jarFile;
          }
        }
        
        
        • StopOptions
        package org.apache.flink.client.cli;
        
        import org.apache.commons.cli.CommandLine;
        
        import static org.apache.flink.client.cli.CliFrontendParser.STOP_AND_DRAIN;
        import static org.apache.flink.client.cli.CliFrontendParser.STOP_WITH_SAVEPOINT_PATH;
        
        /**
         * Command line options for the STOP command.
         * 停止命令的命令行选项
         */
        class StopOptions extends CommandLineOptions {
        
          private final String[] args;
        
          private final boolean savepointFlag;
        
          /** 
           * Optional target directory for the savepoint. Overwrites cluster         * default.
              * 保存点的可选的目标目录, 重写默认集群
           */
          private final String targetDirectory;
            // 是否使事件事件提前
          private final boolean advanceToEndOfEventTime;
        
          StopOptions(CommandLine line) {
            super(line);
            this.args = line.getArgs();
        
            this.savepointFlag = line.hasOption(STOP_WITH_SAVEPOINT_PATH.getOpt());
            this.targetDirectory = line.getOptionValue(STOP_WITH_SAVEPOINT_PATH.getOpt());
        
            this.advanceToEndOfEventTime = line.hasOption(STOP_AND_DRAIN.getOpt());
          }
        
          String[] getArgs() {
            return args == null ? new String[0] : args;
          }
        
          boolean hasSavepointFlag() {
            return savepointFlag;
          }
        
          String getTargetDirectory() {
            return targetDirectory;
          }
        
          boolean shouldAdvanceToEndOfEventTime() {
            return advanceToEndOfEventTime;
          }
        }
        
      • org.apache.flink.client.deployment

        • ClusterDescriptor: 集群描述器接口
        package org.apache.flink.client.deployment;
        
        import org.apache.flink.client.program.ClusterClient;
        import org.apache.flink.runtime.jobgraph.JobGraph;
        import org.apache.flink.util.FlinkException;
        
        /**
         * A descriptor to deploy a cluster (e.g. Yarn or Mesos) and return a Client for Cluster communication.
         * 一个用于部署集群并返回一个与集群交流的客户端
         * 
         * @param <T> Type of the cluster id
         */
        public interface ClusterDescriptor<T> extends AutoCloseable {
        
          /**
           * Returns a String containing details about the cluster (NodeManagers, available memory, ...).
           * 返回一个含有集群详情的信息
           *
           */
          String getClusterDescription();
        
          /**
           * Retrieves an existing Flink Cluster.
           * 获取已存在的Flink集群
           * @param clusterId The unique identifier of the running cluster
           * @return Client for the cluster
           * @throws ClusterRetrieveException if the cluster client could not be retrieved
           */
          ClusterClient<T> retrieve(T clusterId) throws ClusterRetrieveException;
        
          /**
           * Triggers deployment of a cluster.
           * 启动集群的部署
           * @param clusterSpecification Cluster specification defining the cluster to deploy
           * @return Client for the cluster
           * @throws ClusterDeploymentException if the cluster could not be deployed
           */
             // 部署集群会话
          ClusterClient<T> deploySessionCluster(ClusterSpecification clusterSpecification) throws ClusterDeploymentException;
        
          /**
           * Deploys a per-job cluster with the given job on the cluster.
           * 根据已得的任务在集群上部署各自的作业
           *
           * @param clusterSpecification Initial cluster specification with which the Flink cluster is launched
           * 集群规格
           * @param jobGraph JobGraph with which the job cluster is started
           * 任务图
           * @param detached true if the cluster should be stopped after the job completion without serving the result, otherwise false
           * 集群是否应当在任务完成后不提供任务执行结果就被关闭
           * @return Cluster client to talk to the Flink cluster
           * @throws ClusterDeploymentException if the cluster could not be deployed  部署异常
           */
          ClusterClient<T> deployJobCluster(
            final ClusterSpecification clusterSpecification,
            final JobGraph jobGraph,
            final boolean detached) throws ClusterDeploymentException;
        
          /**
           * Terminates the cluster with the given cluster id.
           * 根据所得的id终止集群
           * @param clusterId identifying the cluster to shut down
           * @throws FlinkException if the cluster could not be terminated
           */
          void killCluster(T clusterId) throws FlinkException;
        }
        
        
        • ClusterDeploymentException 集群部署异常
        package org.apache.flink.client.deployment;
        
        import org.apache.flink.util.FlinkException;
        
        /**
         * Class which indicates a problem when deploying a Flink cluster.
         * 表面部署flink集群时遇到的问题, 继承了FlinkException
         */
        public class ClusterDeploymentException extends FlinkException {
        
          private static final long serialVersionUID = -4327724979766139208L;
        
          public ClusterDeploymentException(String message) {
            super(message);
          }
        
          public ClusterDeploymentException(Throwable cause) {
            super(cause);
          }
        
          public ClusterDeploymentException(String message, Throwable cause) {
            super(message, cause);
          }
        }
        
        
        • ClusterRetrieveException 集群获取异常
        package org.apache.flink.client.deployment;
        
        import org.apache.flink.util.FlinkException;
        
        /**
         * Exception which indicates that a cluster could not be retrieved.
         * 当无法获取一个集群的信息时会抛出该异常, 继承了FlinkException
         */
        public class ClusterRetrieveException extends FlinkException {
        
          private static final long serialVersionUID = 7718062507419172318L;
        
          public ClusterRetrieveException(String message) {
            super(message);
          }
        
          public ClusterRetrieveException(Throwable cause) {
            super(cause);
          }
        
          public ClusterRetrieveException(String message, Throwable cause) {
            super(message, cause);
          }
        }
        
        • ClusterSpecification 集群规格
        package org.apache.flink.client.deployment;
        
        import org.apache.flink.configuration.Configuration;
        import org.apache.flink.configuration.ConfigurationUtils;
        import org.apache.flink.configuration.TaskManagerOptions;
        
        /**
         * Description of the cluster to start by the {@link ClusterDescriptor}.
         * 对根据集群描述器所启动的集群的描述
         */
        public final class ClusterSpecification {
            // master的内存(单位: MB)
          private final int masterMemoryMB;
            // 任务管理器的内存(单位: MB)
          private final int taskManagerMemoryMB;
            // 任务管理器的数量
          private final int numberTaskManagers;
            // 每个任务管理器的任务槽数量
          private final int slotsPerTaskManager;
        
          private ClusterSpecification(int masterMemoryMB, int taskManagerMemoryMB, int numberTaskManagers, int slotsPerTaskManager) {
            this.masterMemoryMB = masterMemoryMB;
            this.taskManagerMemoryMB = taskManagerMemoryMB;
            this.numberTaskManagers = numberTaskManagers;
            this.slotsPerTaskManager = slotsPerTaskManager;
          }
        
          public int getMasterMemoryMB() {
            return masterMemoryMB;
          }
        
          public int getTaskManagerMemoryMB() {
            return taskManagerMemoryMB;
          }
        
          public int getNumberTaskManagers() {
            return numberTaskManagers;
          }
        
          public int getSlotsPerTaskManager() {
            return slotsPerTaskManager;
          }
        
          @Override
          public String toString() {
            return "ClusterSpecification{" +
              "masterMemoryMB=" + masterMemoryMB +
              ", taskManagerMemoryMB=" + taskManagerMemoryMB +
              ", numberTaskManagers=" + numberTaskManagers +
              ", slotsPerTaskManager=" + slotsPerTaskManager +
              '}';
          }
        
             // 根据配置获取集群规格
          public static ClusterSpecification fromConfiguration(Configuration configuration) {
            int slots = configuration.getInteger(TaskManagerOptions.NUM_TASK_SLOTS, 1);
        
            int jobManagerMemoryMb = ConfigurationUtils.getJobManagerHeapMemory(configuration).getMebiBytes();
            int taskManagerMemoryMb = ConfigurationUtils.getTaskManagerHeapMemory(configuration).getMebiBytes();
        
            return new ClusterSpecificationBuilder()
              .setMasterMemoryMB(jobManagerMemoryMb)
              .setTaskManagerMemoryMB(taskManagerMemoryMb)
              .setNumberTaskManagers(1)
              .setSlotsPerTaskManager(slots)
              .createClusterSpecification();
          }
        
          /**
           * Builder for the {@link ClusterSpecification} instance.
           * 集群规格的构造器
           */
          public static class ClusterSpecificationBuilder {
                // 默认master内存为768MB
            private int masterMemoryMB = 768;
                // 默认任务管理器内存为768MB
            private int taskManagerMemoryMB = 768;
                // 默认任务管理器数量为1
            private int numberTaskManagers = 1;
                // 默认美国任务管理器的任务槽数量为1
            private int slotsPerTaskManager = 1;
        
            public ClusterSpecificationBuilder setMasterMemoryMB(int masterMemoryMB) {
              this.masterMemoryMB = masterMemoryMB;
              return this;
            }
        
            public ClusterSpecificationBuilder setTaskManagerMemoryMB(int taskManagerMemoryMB) {
              this.taskManagerMemoryMB = taskManagerMemoryMB;
              return this;
            }
        
            public ClusterSpecificationBuilder setNumberTaskManagers(int numberTaskManagers) {
              this.numberTaskManagers = numberTaskManagers;
              return this;
            }
        
            public ClusterSpecificationBuilder setSlotsPerTaskManager(int slotsPerTaskManager) {
              this.slotsPerTaskManager = slotsPerTaskManager;
              return this;
            }
        
            public ClusterSpecification createClusterSpecification() {
              return new ClusterSpecification(
                masterMemoryMB,
                taskManagerMemoryMB,
                numberTaskManagers,
                slotsPerTaskManager);
            }
          }
        }
        
        • StandaloneClusterDescriptor 单个集群描述器
        package org.apache.flink.client.deployment;
        
        import org.apache.flink.client.program.rest.RestClusterClient;
        import org.apache.flink.configuration.Configuration;
        import org.apache.flink.configuration.JobManagerOptions;
        import org.apache.flink.runtime.jobgraph.JobGraph;
        import org.apache.flink.util.FlinkException;
        import org.apache.flink.util.Preconditions;
        
        /**
         * A deployment descriptor for an existing cluster.
         * 对已存在的集群进行描述
         */
        public class StandaloneClusterDescriptor implements ClusterDescriptor<StandaloneClusterId> {
        
          private final Configuration config;
        
          public StandaloneClusterDescriptor(Configuration config) {
            this.config = Preconditions.checkNotNull(config);
          }
        
            // 获取集群描述
          @Override
          public String getClusterDescription() {
            String host = config.getString(JobManagerOptions.ADDRESS, "");
            int port = config.getInteger(JobManagerOptions.PORT, -1);
            return "Standalone cluster at " + host + ":" + port;
          }
        
            // 获取集群客户端信息
          @Override
          public RestClusterClient<StandaloneClusterId> retrieve(StandaloneClusterId standaloneClusterId) throws ClusterRetrieveException {
            try {
              return new RestClusterClient<>(config, standaloneClusterId);
            } catch (Exception e) {
              throw new ClusterRetrieveException("Couldn't retrieve standalone cluster", e);
            }
          }
        
            // 关闭与客户端的会话
          @Override
          public RestClusterClient<StandaloneClusterId> deploySessionCluster(ClusterSpecification clusterSpecification) {
            throw new UnsupportedOperationException("Can't deploy a standalone cluster.");
          }
        
            // 使集群客户端休息
          @Override
          public RestClusterClient<StandaloneClusterId> deployJobCluster(
              ClusterSpecification clusterSpecification,
              JobGraph jobGraph,
              boolean detached) {
            throw new UnsupportedOperationException("Can't deploy a standalone per-job cluster.");
          }
        
            // 杀死集群
          @Override
          public void killCluster(StandaloneClusterId clusterId) throws FlinkException {
            throw new UnsupportedOperationException("Cannot terminate a standalone cluster.");
          }
        
          @Override
          public void close() throws Exception {
            // nothing to do
          }
        }
        
        
        • StandaloneClusterId 单个集群Id
        package org.apache.flink.client.deployment;
        
        /**
         * Identifier for standalone clusters.
         */
        public class StandaloneClusterId {
          private static final StandaloneClusterId INSTANCE = new StandaloneClusterId();
        
          private StandaloneClusterId() {}
        
          public static StandaloneClusterId getInstance() {
            return INSTANCE;
          }
        }
        
      • org.apache.flink.client.program

        • ClusterClient 集群客户端
        package org.apache.flink.client.program;
        
        import org.apache.flink.api.common.JobExecutionResult;
        import org.apache.flink.api.common.JobID;
        import org.apache.flink.api.common.JobSubmissionResult;
        import org.apache.flink.api.common.Plan;
        import org.apache.flink.configuration.Configuration;
        import org.apache.flink.core.fs.Path;
        import org.apache.flink.optimizer.CompilerException;
        import org.apache.flink.optimizer.DataStatistics;
        import org.apache.flink.optimizer.Optimizer;
        import org.apache.flink.optimizer.costs.DefaultCostEstimator;
        import org.apache.flink.optimizer.plan.FlinkPlan;
        import org.apache.flink.optimizer.plan.OptimizedPlan;
        import org.apache.flink.optimizer.plan.StreamingPlan;
        import org.apache.flink.optimizer.plandump.PlanJSONDumpGenerator;
        import org.apache.flink.optimizer.plantranslate.JobGraphGenerator;
        import org.apache.flink.runtime.client.JobStatusMessage;
        import org.apache.flink.runtime.jobgraph.JobGraph;
        import org.apache.flink.runtime.jobgraph.JobStatus;
        import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
        import org.apache.flink.runtime.jobmaster.JobResult;
        import org.apache.flink.runtime.messages.Acknowledge;
        import org.apache.flink.util.FlinkException;
        import org.apache.flink.util.OptionalFailure;
        import org.apache.flink.util.Preconditions;
        
        import org.slf4j.Logger;
        import org.slf4j.LoggerFactory;
        
        import javax.annotation.Nonnull;
        import javax.annotation.Nullable;
        
        import java.net.URISyntaxException;
        import java.net.URL;
        import java.util.Collection;
        import java.util.List;
        import java.util.Map;
        import java.util.concurrent.CompletableFuture;
        
        /**
         * Encapsulates the functionality necessary to submit a program to a remote cluster.
         * 封装了将一个程序提交到一个远程集群所必须要有的功能
         * @param <T> type of the cluster id 集群id的类型(T)
         */
         // 实现了可自动关闭的接口(jdk1.7, Josh Bloch)
        public abstract class ClusterClient<T> implements AutoCloseable {
            // 通过日志工厂获取日志类(getLogger通过反射获取)
          protected final Logger log = LoggerFactory.getLogger(getClass());
        
          /** The optimizer used in the optimization of batch programs. */
            // 应用于批处理程序优化的优化器
          final Optimizer compiler;
        
          /** Configuration of the client. */
            // 客户端配置
          private final Configuration flinkConfig;
        
          /**
           * For interactive invocations, the job results are only available after the ContextEnvironment has been run inside the user JAR. We pass the Client to every instance of the ContextEnvironment which lets us access the execution result here.
           * 对于互相调用, 任务执行结果只有在上下文环境已经在用户jar中运行的状态时才可以获取。 我们将每个客户转给每个上下文环境实例来获取执行结果。
           */
            // 最终任务执行结果
          protected JobExecutionResult lastJobExecutionResult;
        
          /** Switch for blocking/detached job submission of the client. */
            // 是否启用分离任务提交模式的标签, 为false则使用阻塞式的任务提交
          private boolean detachedJobSubmission = false;
        
          // ------------------------------------------------------------------------
          //                            Construction 架构
          // ------------------------------------------------------------------------
        
          /**
           * Creates a instance that submits the programs to the JobManager defined in the configuration.
           * 创建一个提交程序到配置中已定义的任务管理器的实例
           * This method will try to resolve the JobManager hostname and throw an exception if that is not possible.
           * 该方法会设法去决定任务管理器的主机名, 如果无法决定,则会抛出异常
           *
           * @param flinkConfig The config used to obtain the job-manager's address, and used to configure the optimizer.
           * flinkConfig参数: 获取任务管理器地址并被用于配置程序优化器的配置
           */
          public ClusterClient(Configuration flinkConfig) {
            this.flinkConfig = Preconditions.checkNotNull(flinkConfig);
            this.compiler = new Optimizer(new DataStatistics(), new DefaultCostEstimator(), flinkConfig);
          }
        
          /**
           * User overridable hook to close the client, possibly closes internal services.
           * 用户重写hook来关闭客户端, 可能关闭内部服务
           * @deprecated use the {@link #close()} instead. This method stays for backwards compatibility.
           * 该方法已过时, 请使用close()方法, 仍存在该方法是为了向后兼容性
           */
          public void shutdown() throws Exception {
            close();
          }
        
          @Override
          public void close() throws Exception {
        
          }
        
          // ------------------------------------------------------------------------
          //  Access to the Program's Plan
            // 获取程序的计划
          // ------------------------------------------------------------------------
            // 获取优化过的JSON格式的计划 参数: 优化器, 打包的程序, 并行数
          public static String getOptimizedPlanAsJson(Optimizer compiler, PackagedProgram prog, int parallelism)
              throws CompilerException, ProgramInvocationException {
                 // 计划的json格式dump文件生成器
            PlanJSONDumpGenerator jsonGen = new PlanJSONDumpGenerator();
            return jsonGen.getOptimizerPlanAsJSON((OptimizedPlan) getOptimizedPlan(compiler, prog, parallelism));
          }
                 // 获取优化过的计划
          public static FlinkPlan getOptimizedPlan(Optimizer compiler, PackagedProgram prog, int parallelism)
              throws CompilerException, ProgramInvocationException {
                 // 获取上下文类加载器(其实就是获取当前线程的上下午加载器)
            final ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
            try {
                    
        // 将当前线程的上下文加载器设置为进程的用户码类加载器
                    Thread.currentThread().setContextClassLoader(prog.getUserCodeClassLoader());
        
              // temporary hack to support the optimizer plan preview
                     // 临时的 侵入 以支持优化器计划 预览
              OptimizerPlanEnvironment env = new OptimizerPlanEnvironment(compiler);
                     // 如果并行数大于0, 就将优化器计划环境的并行数设置为该并行数
              if (parallelism > 0) {
                env.setParallelism(parallelism);
              }
              return env.getOptimizedPlan(prog);
            } finally {
            // 将当前线程的上下文加载器设置为之前获取到的类加载器
                        Thread.currentThread().setContextClassLoader(contextClassLoader);
            }
          }
             // 获取优化器计划
          public static OptimizedPlan getOptimizedPlan(Optimizer compiler, Plan p, int parallelism) throws CompilerException {
                 // 通过日志工厂类和反射获取日志
            Logger log = LoggerFactory.getLogger(ClusterClient.class);
        
                 // 如果并行数 > 0 且 计划的默认并行数 <= 0 
            if (parallelism > 0 && p.getDefaultParallelism() <= 0) {
                    // 就在日志中打印: 将默认并行数从计划的默认并行数改为传入的并行数
              log.debug("Changing plan default parallelism from {} to {}", p.getDefaultParallelism(), parallelism);
                     // 将默认的并行数也设置为当前传入的并行数
              p.setDefaultParallelism(parallelism);
            }
            log.debug("Set parallelism {}, plan default parallelism {}", parallelism, p.getDefaultParallelism());
        
                 // 编译该计划并返回优化过的计划
            return compiler.compile(p);
          }
        
          // ------------------------------------------------------------------------
          //  Program submission / execution 程序提交/执行
          // ------------------------------------------------------------------------
        
          /**
           * General purpose method to run a user jar from the CliFrontend in either blocking or detached mode, depending on whether {@code setDetached(true)} or {@code setDetached(false)}.
           * 该方法的主要目的是从客户端前端(无论是阻塞模式还是分离模式)运行一个用户jar, 运行哪种模式取决于setDetached标签
           *
           * @param prog the packaged program 打包的程序
           * @param parallelism the parallelism to execute the contained Flink job 并行数
           * @return The result of the execution 返回执行结果
           * @throws ProgramMissingJobException 抛出程序无任务异常
           * @throws ProgramInvocationException 抛出程序调用异常
           */
             // 通过打包的程序和并行度运行, 并获取任务提交的结果
          public JobSubmissionResult run(PackagedProgram prog, int parallelism)
              throws ProgramInvocationException, ProgramMissingJobException {
                 // 获取当前线程的上下文加载器
            final ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
            try {
         // 将当前线程的类加载器设置为打包的程序的用户码类加载器
                    Thread.currentThread().setContextClassLoader(prog.getUserCodeClassLoader());
        
                     // 打印是否使用了分离模式
              log.info("Starting program (detached: {})", isDetached());
        
                     // 获取程序所需运行的所有library, 本质上是个URL的列表集合
              final List<URL> libraries = prog.getAllLibraries();
                     // 初始化上下文工厂
              ContextEnvironmentFactory factory = new ContextEnvironmentFactory(this, libraries,
                  prog.getClasspaths(), prog.getUserCodeClassLoader(), parallelism, isDetached(),
                  prog.getSavepointSettings());
              ContextEnvironment.setAsContext(factory);
        
              try {
                // invoke main method 请求主方法
                prog.invokeInteractiveModeForExecution();
                          // 最后的任务执行结果为空, 就抛出程序没有任务异常
                if (lastJobExecutionResult == null) {
                  throw new ProgramMissingJobException("The program didn't contain a Flink job.");
                }
                         // 返回当前最后的任务的执行结果
                return this.lastJobExecutionResult;
              } finally {
                         /**个沙雕装饰器模式, 执行unsetContext()方法实际调用的是
                          resetContextEnvironment方法, 将上下文工厂设置为空, 并且
                          将存储在threadLocal中的上下文工厂设置为空。
                          threadlocal而是一个线程内部的存储类,可以在指定线程内存储数据,数据存储以后,只有指定线程可以得到存储数据。在此先不做展开, 以后填坑
                          */
                ContextEnvironment.unsetContext();
              }
            }
            finally {
          // 设置当前线程的上下午类加载器为获取的类加载器
                    Thread.currentThread().setContextClassLoader(contextClassLoader);
            }
          }
            // 运行并返回任务执行的结果
          public JobSubmissionResult run(
            Plan plan,
            List<URL> libraries,
            List<URL> classpaths,
            ClassLoader classLoader,
            int parallelism,
            SavepointRestoreSettings savepointSettings) throws CompilerException, ProgramInvocationException {
                  // 获取优化过的计划
            OptimizedPlan optPlan = getOptimizedPlan(compiler, plan, parallelism);
            return run(optPlan, libraries, classpaths, classLoader, savepointSettings);
          }
            // 运行并返回任务执行的结果
          public JobSubmissionResult run(
            FlinkPlan compiledPlan,
            List<URL> libraries,
            List<URL> classpaths,
            ClassLoader classLoader,
            SavepointRestoreSettings savepointSettings) throws ProgramInvocationException {
                 // 获取任务视图
            JobGraph job = getJobGraph(flinkConfig, compiledPlan, libraries, classpaths, savepointSettings);
            return submitJob(job, classLoader);
          }
        
          /**
           * Requests the {@link JobStatus} of the job with the given {@link JobID}.
           * 需要有已获取的任务Id的任务状态
           */
             // 使用CompletableFuture实现异步调用, 根据任务Id获取任务状态
          public abstract CompletableFuture<JobStatus> getJobStatus(JobID jobId);
        
          /**
           * Cancels a job identified by the job id.
           * 根据任务id取消任务
           * @param jobId the job id
           * @throws Exception In case an error occurred.
           */
          public abstract void cancel(JobID jobId) throws Exception;
        
          /**
           * Cancels a job identified by the job id and triggers a savepoint.
           * 根据任务id取消了任务或者引发了一个保存点
           *
           * @param jobId the job id
           * @param savepointDirectory directory the savepoint should be written to 保存点应该被写入到的目录
           * @return path where the savepoint is located
           * @throws Exception In case an error occurred.
           */
          public abstract String cancelWithSavepoint(JobID jobId, @Nullable String savepointDirectory) throws Exception;
        
          /**
           * Stops a program on Flink cluster whose job-manager is configured in this client's configuration.
           * 在Flink集群上停止一个程序, 该程序的任务管理器被配置在此客户端的配置中
           *
           * Stopping works only for streaming programs. Be aware, that the program might continue to run for a while after sending the stop command, because after sources stopped to emit data all operators need to finish processing.
           * 只停止流式程序的工作。需要注意的是: 在发出停止的命令行之后, 该程序可能还会运行一段时间, 因为在资源被停止后需要将数据散发到所有操作器来结束进程
           *
           * @param jobId the job ID of the streaming program to stop
           * @param advanceToEndOfEventTime flag indicating if the source should inject a {@code MAX_WATERMARK} in the pipeline
           * advanceToEndOfEventTime(提前到最终的事件时间) 参数: 判断该资源是否应该拒绝管道中一个最大水位线
           * @param savepointDirectory directory the savepoint should be written to 保存点应当被写入到的目录
           * @return a {@link CompletableFuture} containing the path where the savepoint is located
           * 返回一个包含保存点路径CompletableFuture类
           * @throws Exception
           * If the job ID is invalid (ie, is unknown or refers to a batch job) or if sending the stop signal failed. That might be due to an I/O problem, ie, the job-manager is unreachable.
           * 如果任务id无效(比如未知或者是一个批处理任务) 或者 发送停止信号失败, 这有可能是因为输入输出流导致的错误, 比如任务管理器无法获取。
           */
          public abstract String stopWithSavepoint(final JobID jobId, final boolean advanceToEndOfEventTime, @Nullable final String savepointDirectory) throws Exception;
        
          /**
           * Triggers a savepoint for the job identified by the job id. The savepoint will be written to the given savepoint directory, or {@link org.apache.flink.configuration.CheckpointingOptions#SAVEPOINT_DIRECTORY} if it is null.
           * 根据由任务id获取的任务引发一个保存点, 该保存点会被写入到一个已得的保存点目录 或者 检查配置中的保存点目录 如果 保存点为空
           *
           * @param jobId job id
           * @param savepointDirectory directory the savepoint should be written to
           * @return path future where the savepoint is located
           * @throws FlinkException if no connection to the cluster could be established
           */
          public abstract CompletableFuture<String> triggerSavepoint(JobID jobId, @Nullable String savepointDirectory) throws FlinkException;
        
          public abstract CompletableFuture<Acknowledge> disposeSavepoint(String savepointPath) throws FlinkException;
        
          /**
           * Lists the currently running and finished jobs on the cluster.
           * 列出集群中当前正在运行和已完成的任务
           * @return future collection of running and finished jobs
           * 返回运行中和完成的任务的CompletableFuture集合
           * @throws Exception if no connection to the cluster could be established
           */
          public abstract CompletableFuture<Collection<JobStatusMessage>> listJobs() throws Exception;
        
          /**
           * Requests and returns the accumulators for the given job identifier. Accumulators can be requested while a job is running or after it has finished. The default class loader is used to deserialize the incoming accumulator results.
           * 请求并返回已得任务的标识的累加器。当一个任务在运行或者在它完成之后, 可以请求累加器, 默认的类加载器被用于将将要到来的累加器结果反序列化
           * @param jobID The job identifier of a job.
           * @return A Map containing the accumulator's name and its value.
           * 返回一个含有累加器名称(K)和值(V)得Map
           */
          public Map<String, OptionalFailure<Object>> getAccumulators(JobID jobID) throws Exception {
            return getAccumulators(jobID, ClassLoader.getSystemClassLoader());
          }
        
          /**
           * Requests and returns the accumulators for the given job identifier. Accumulators can be requested while a job is running or after it has finished.
           * 请求并返回已得任务标识的累加器
           *
           * @param jobID The job identifier of a job.
           * @param loader The class loader for deserializing the accumulator results.
           * @return A Map containing the accumulator's name and its value.
           */
          public abstract Map<String, OptionalFailure<Object>> getAccumulators(JobID jobID, ClassLoader loader) throws Exception;
        
          // ------------------------------------------------------------------------
          //  Internal translation methods 内部转换方法
          // ------------------------------------------------------------------------
        
             // 获取任务视图 参数: flink配置文件, 打包的程序, 选择的计划, 保存点恢复设置
          public static JobGraph getJobGraph(Configuration flinkConfig, PackagedProgram prog, FlinkPlan optPlan, SavepointRestoreSettings savepointSettings) throws ProgramInvocationException {
            return getJobGraph(flinkConfig, optPlan, prog.getAllLibraries(), prog.getClasspaths(), savepointSettings);
          }
        
            // 获取任务视图 参数: flink配置文件, 选择的计划, jar文件列表, 类路径列表,
            // 保存点恢复设置
          public static JobGraph getJobGraph(Configuration flinkConfig, FlinkPlan optPlan, List<URL> jarFiles, List<URL> classpaths, SavepointRestoreSettings savepointSettings) {
            JobGraph job;
                 // 如果选择的计划是一个流式运算计划
            if (optPlan instanceof StreamingPlan) {
                     // 将optplan强转为StreamingPlan再获取任务视图
              job = ((StreamingPlan) optPlan).getJobGraph();
                     // 设置保存点恢复设置
              job.setSavepointRestoreSettings(savepointSettings);
            } else {
                     // 根据配置初始化任务视图生成器
              JobGraphGenerator gen = new JobGraphGenerator(flinkConfig);
                     // 编译优化过的计划获取任务视图
              job = gen.compileJobGraph((OptimizedPlan) optPlan);
            }
                 // 遍历jar文件URL列表
            for (URL jar : jarFiles) {
              try {
                          // 将该jar的路径添加到任务视图中(将jar转化为统一标识符传入)
                job.addJar(new Path(jar.toURI()));
              } catch (URISyntaxException e) {
                         // 抛出URL无效异常
                throw new RuntimeException("URL is invalid. This should not happen.", e);
              }
            }
                 // 设置任务的类路径
            job.setClasspaths(classpaths);
        
            return job;
          }
        
          // ------------------------------------------------------------------------
          //  Abstract methods to be implemented by the cluster specific Client
            // 被空气棉的特定客户实现的抽象方法
          // ------------------------------------------------------------------------
        
          /**
           * Returns an URL (as a string) to the JobManager web interface.
           * 返回一个String类的URL任务管理器web界面
           */
          public abstract String getWebInterfaceURL();
        
          /**
           * Returns the cluster id identifying the cluster to which the client is connected.
           * 返回集群id来鉴别该连接哪个集群 
           *
           * @return cluster id of the connected cluster
           */
          public abstract T getClusterId();
        
          /**
           * Set the mode of this client (detached or blocking job execution).
           * 设置客户端执行模式: 分离还是阻塞式执行
           * @param isDetached If true, the client will submit programs detached via the {@code run} method
           */
          public void setDetached(boolean isDetached) {
            this.detachedJobSubmission = isDetached;
          }
        
          /**
           * A flag to indicate whether this clients submits jobs detached.
           * 表明该客户端是否提交了分离式执行的任务的标签
           * @return True if the Client submits detached, false otherwise
           */
          public boolean isDetached() {
            return detachedJobSubmission;
          }
        
          /**
           * Return the Flink configuration object.
           * 返回Flink配置文件对象
           * @return The Flink configuration object
           */
          public Configuration getFlinkConfiguration() {
            return flinkConfig.clone(); // 这是一个deepcopy, clone新创建了一个对象
          }
        
          /**
           * Calls the subclasses' submitJob method. It may decide to simply call one of the run methods or it may perform some custom job submission logic.
           * 请求了子类的提交任务方法, 它可能决定去简化请求run方法中的一个 或者 它可能表现出一些客户端提交任务的逻辑
           * @param jobGraph The JobGraph to be submitted
           * @return JobSubmissionResult
           */
          public abstract JobSubmissionResult submitJob(JobGraph jobGraph, ClassLoader classLoader) throws ProgramInvocationException;
        
          /**
           * Submit the given {@link JobGraph} to the cluster.
           * 提交已得的任务视图到集群
           * @param jobGraph to submit
           * @return Future which is completed with the {@link JobSubmissionResult}
           */
          public abstract CompletableFuture<JobSubmissionResult> submitJob(@Nonnull JobGraph jobGraph);
        
          /**
           * Request the {@link JobResult} for the given {@link JobID}.
           * 根据已得的任务Id请求任务结果, 任务Id不能为空
           * @param jobId for which to request the {@link JobResult}
           * @return Future which is completed with the {@link JobResult}
           */
          public abstract CompletableFuture<JobResult> requestJobResult(@Nonnull JobID jobId);
             
            // 终止集群, 抛出不支持的操作异常
          public void shutDownCluster() {
            throw new UnsupportedOperationException("The " + getClass().getSimpleName() + " does not support shutDownCluster.");
          }
        }
        
        • ContextEnvironment 上下文环境
        package org.apache.flink.client.program;
        
        import org.apache.flink.api.common.ExecutionConfig;
        import org.apache.flink.api.common.InvalidProgramException;
        import org.apache.flink.api.common.JobExecutionResult;
        import org.apache.flink.api.common.JobSubmissionResult;
        import org.apache.flink.api.common.Plan;
        import org.apache.flink.api.java.ExecutionEnvironment;
        import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
        
        import java.net.URL;
        import java.util.List;
        
        /**
         * Execution Environment for remote execution with the Client.
         * 客户端远程执行的执行环境
         */
        public class ContextEnvironment extends ExecutionEnvironment {
        
            // 集群客户端
          private final ClusterClient<?> client;
            // 是否分离执行?
          private final boolean detached;
            // 要附上的jar文件(URL列表)
          private final List<URL> jarFilesToAttach;
            // 要附上的类路径(URL列表)
          private final List<URL> classpathsToAttach;
            // 用户代码加载器
          private final ClassLoader userCodeClassLoader;
            // 保存点恢复设置
          private final SavepointRestoreSettings savepointSettings;
            // 是否已经被请求过了(默认为false)
          private boolean alreadyCalled;
        
          public ContextEnvironment(ClusterClient<?> remoteConnection, List<URL> jarFiles, List<URL> classpaths,
                ClassLoader userCodeClassLoader, SavepointRestoreSettings savepointSettings, boolean detached) {
            this.client = remoteConnection;
            this.jarFilesToAttach = jarFiles;
            this.classpathsToAttach = classpaths;
            this.userCodeClassLoader = userCodeClassLoader;
            this.savepointSettings = savepointSettings;
        
            this.detached = detached;
            this.alreadyCalled = false;
          }
        
             // 根据任务名执行任务, 返回任务执行结果
          @Override
          public JobExecutionResult execute(String jobName) throws Exception {
                 // 验证在分离模式下执行是否只被调用一次
            verifyExecuteIsCalledOnceWhenInDetachedMode();
                 // 根据任务名创建程序计划
            final Plan plan = createProgramPlan(jobName);
            final JobSubmissionResult jobSubmissionResult = client.run(
              plan,
              jarFilesToAttach,
              classpathsToAttach,
              userCodeClassLoader,
              getParallelism(),
              savepointSettings);
                 // 获取最终的任务执行结果
            lastJobExecutionResult = jobSubmissionResult.getJobExecutionResult();
            return lastJobExecutionResult;
          }
            // 验证在分离模式下执行是否只被调用一次
          private void verifyExecuteIsCalledOnceWhenInDetachedMode() {
                // 如果已经被请求过 且 是分离模式
            if (alreadyCalled && detached) {
                     // 抛出无效的程序异常
              throw new InvalidProgramException(DetachedJobExecutionResult.DETACHED_MESSAGE + DetachedJobExecutionResult.EXECUTE_TWICE_MESSAGE);
            }
                 // 将是否已被请求过了的标签设置为true
            alreadyCalled = true;
          }
        
            // 重写toString
          @Override
          public String toString() {
            return "Context Environment (parallelism = " + (getParallelism() == ExecutionConfig.PARALLELISM_DEFAULT ? "default" : getParallelism()) + ")";
          }
        
          public ClusterClient<?> getClient() {
            return this.client;
          }
        
          public List<URL> getJars(){
            return jarFilesToAttach;
          }
        
          public List<URL> getClasspaths(){
            return classpathsToAttach;
          }
        
          public ClassLoader getUserCodeClassLoader() {
            return userCodeClassLoader;
          }
        
          public SavepointRestoreSettings getSavepointRestoreSettings() {
            return savepointSettings;
          }
        
          // --------------------------------------------------------------------------------------------
            // 又是个坑爹的装饰器, 明明是根据上下文环境初始化上下文环境内容, 非要起个设置为上下文的容易误导人的方法
          static void setAsContext(ContextEnvironmentFactory factory) {
                 // 该方法内会先检查工厂类是否为空, 再将保存在threadlocal类中的上下文环境工厂设置为该上下文工厂实例
                 // ps: private static final ThreadLocal<ExecutionEnvironmentFactory> threadLocalContextEnvironmentFactory = new ThreadLocal<>();
                 // 这本来就是一盒带执行环境工厂类泛型的一个ThreadLocal类
            initializeContextEnvironment(factory);
          }
        
            // 前面讲过, 还是坑爹的装饰器, 不说了
          static void unsetContext() {
            resetContextEnvironment();
          }
        }
        
        • ContextEnvironmentFactory 上下文环境工厂
        package org.apache.flink.client.program;
        
        import org.apache.flink.api.common.InvalidProgramException;
        import org.apache.flink.api.java.ExecutionEnvironment;
        import org.apache.flink.api.java.ExecutionEnvironmentFactory;
        import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
        
        import java.net.URL;
        import java.util.List;
        
        /**
         * The factory that instantiates the environment to be used when running jobs that are submitted through a pre-configured client connection.
         * 该工厂将环境实例化为在运行通过前置配置的客户端连接的任务可使用的环境
         * This happens for example when a job is submitted from the command line. 比如一个任务从命令行被提交
         */
        public class ContextEnvironmentFactory implements ExecutionEnvironmentFactory {
        
          private final ClusterClient<?> client;
        
          private final List<URL> jarFilesToAttach;
        
          private final List<URL> classpathsToAttach;
        
          private final ClassLoader userCodeClassLoader;
        
          private final int defaultParallelism;
        
          private final boolean isDetached;
        
          private final SavepointRestoreSettings savepointSettings;
        
          private boolean alreadyCalled;
        
          public ContextEnvironmentFactory(ClusterClient<?> client, List<URL> jarFilesToAttach,
              List<URL> classpathsToAttach, ClassLoader userCodeClassLoader, int defaultParallelism,
              boolean isDetached, SavepointRestoreSettings savepointSettings) {
            this.client = client;
            this.jarFilesToAttach = jarFilesToAttach;
            this.classpathsToAttach = classpathsToAttach;
            this.userCodeClassLoader = userCodeClassLoader;
            this.defaultParallelism = defaultParallelism;
            this.isDetached = isDetached;
            this.savepointSettings = savepointSettings;
            this.alreadyCalled = false;
          }
        
             // 创建执行环境
          @Override
          public ExecutionEnvironment createExecutionEnvironment() {
            verifyCreateIsCalledOnceWhenInDetachedMode();
        
            final ContextEnvironment environment = new ContextEnvironment(
                client, jarFilesToAttach, classpathsToAttach, userCodeClassLoader, savepointSettings, isDetached);
            if (defaultParallelism > 0) {
              environment.setParallelism(defaultParallelism);
            }
            return environment;
          }
        
          private void verifyCreateIsCalledOnceWhenInDetachedMode() {
            if (isDetached && alreadyCalled) {
              throw new InvalidProgramException("Multiple environments cannot be created in detached mode");
            }
            alreadyCalled = true;
          }
        }
        
        • DetachedJobExecutionResult 分离的任务的执行结果
        package org.apache.flink.client.program;
        
        import org.apache.flink.api.common.InvalidProgramException;
        import org.apache.flink.api.common.JobExecutionResult;
        import org.apache.flink.api.common.JobID;
        
        import java.util.Map;
        
        /**
         * The {@link JobExecutionResult} returned by a {@link ContextEnvironment} when executing a job in detached mode.
         * 当一个任务以分离模式被执行的情况下, 有上下文环境所返回的任务执行结果
         */
        public final class DetachedJobExecutionResult extends JobExecutionResult {
            // 以分离模式提交
          static final String DETACHED_MESSAGE = "Job was submitted in detached mode. ";
            // 执行两次会发送的消息: 只允许调用一次
          static final String EXECUTE_TWICE_MESSAGE = "Only one call to execute is allowed. ";
            // 渴望的方法消息??? 难道是打错了的近似提示? 喵喵喵?
          static final String EAGER_FUNCTION_MESSAGE = "Please make sure your program doesn't call " +
              "an eager execution function [collect, print, printToErr, count]. ";
            // 任务结果消息
          static final String JOB_RESULT_MESSAGE = "Results of job execution, such as accumulators," +
              " runtime, etc. are not available. ";
            // 分类模式任务执行结果
          public DetachedJobExecutionResult(final JobID jobID) {
            super(jobID, -1, null);
          }
            // 获取网络运行时间
          @Override
          public long getNetRuntime() {
            throw new InvalidProgramException(DETACHED_MESSAGE + JOB_RESULT_MESSAGE);
          }
        
            // 根据累加器名称获取累加器执行结果
          @Override
          public <T> T getAccumulatorResult(String accumulatorName) {
            throw new InvalidProgramException(DETACHED_MESSAGE + JOB_RESULT_MESSAGE + EAGER_FUNCTION_MESSAGE);
          }
        
            // 获取所有的累加器结果
          @Override
          public Map<String, Object> getAllAccumulatorResults() {
            throw new InvalidProgramException(DETACHED_MESSAGE + JOB_RESULT_MESSAGE);
          }
        
             // 根据累加器名获取整型数计数结果
          @Override
          public Integer getIntCounterResult(String accumulatorName) {
            throw new InvalidProgramException(DETACHED_MESSAGE + JOB_RESULT_MESSAGE);
          }
        
          @Override
          public JobID getJobID() {
            return super.getJobID();
          }
        
             // 是否为任务执行结果? 没搞懂这个是干嘛的, 判断是否是任务执行的结果?
          @Override
          public boolean isJobExecutionResult() {
            return false;
          }
        
            // 获取任务执行的结果
          @Override
          public JobExecutionResult getJobExecutionResult() {
            return this;
          }
        }
        
        
        • MiniClusterClient 最小集群客户端
        package org.apache.flink.client.program;
        
        import org.apache.flink.api.common.JobID;
        import org.apache.flink.api.common.JobSubmissionResult;
        import org.apache.flink.configuration.Configuration;
        import org.apache.flink.runtime.client.JobExecutionException;
        import org.apache.flink.runtime.client.JobStatusMessage;
        import org.apache.flink.runtime.executiongraph.AccessExecutionGraph;
        import org.apache.flink.runtime.jobgraph.JobGraph;
        import org.apache.flink.runtime.jobgraph.JobStatus;
        import org.apache.flink.runtime.jobmaster.JobResult;
        import org.apache.flink.runtime.messages.Acknowledge;
        import org.apache.flink.runtime.minicluster.MiniCluster;
        import org.apache.flink.util.ExceptionUtils;
        import org.apache.flink.util.OptionalFailure;
        import org.apache.flink.util.SerializedValue;
        
        import javax.annotation.Nonnull;
        import javax.annotation.Nullable;
        
        import java.io.IOException;
        import java.util.Collection;
        import java.util.HashMap;
        import java.util.Map;
        import java.util.concurrent.CompletableFuture;
        import java.util.concurrent.ExecutionException;
        
        /**
         * Client to interact with a {@link MiniCluster}.
         * 与最小集群交互的客户端, 继承了集群客户端接口
         */
        public class MiniClusterClient extends ClusterClient<MiniClusterClient.MiniClusterId> {
        
          private final MiniCluster miniCluster;
            // 配置文件类和最小集群客户端不可为空
          public MiniClusterClient(@Nonnull Configuration configuration, @Nonnull MiniCluster miniCluster) {
            super(configuration);
            this.miniCluster = miniCluster;
          }
        
            // 提交任务, 返回任务提交结果
          @Override
          public JobSubmissionResult submitJob(JobGraph jobGraph, ClassLoader classLoader) throws ProgramInvocationException {
                 // 又是使用了CompletableFuture来实现异步调用(任务异步提交)
            final CompletableFuture<JobSubmissionResult> jobSubmissionResultFuture = submitJob(jobGraph);
                    
                // 是否分离
            if (isDetached()) {
              try {
                         // 从CompletableFuture中取出任务提交结果
                final JobSubmissionResult jobSubmissionResult = jobSubmissionResultFuture.get();
        
                         // 根据任务执行结果的任务Id获取最终任务执行结果
                lastJobExecutionResult = new DetachedJobExecutionResult(jobSubmissionResult.getJobID());
                return lastJobExecutionResult;
                        // catch到 打断异常 或 执行异常
              } catch (InterruptedException | ExecutionException e) {
                         // 检查是否为打断异常
                ExceptionUtils.checkInterrupted(e);
        
                throw new ProgramInvocationException("Could not run job in detached mode.", jobGraph.getJobID(), e);
              }
            } else {
                     // 使用CompletableFuture类的thenCompose方法来实现多个CompleteFuture调用的连接
              final CompletableFuture<JobResult> jobResultFuture = jobSubmissionResultFuture.thenCompose(
                (JobSubmissionResult ignored) -> requestJobResult(jobGraph.getJobID()));
        
                     // 任务结果
              final JobResult jobResult;
              try {
                         // 从CompletableFuture中获取结果
                jobResult = jobResultFuture.get();
                        // 捕捉到打断异常或执行异常
              } catch (InterruptedException | ExecutionException e) {
                         // 检查是否为打断异常
                ExceptionUtils.checkInterrupted(e);
        
                throw new ProgramInvocationException("Could not run job", jobGraph.getJobID(), e);
              }
        
              try {
                         // 获取最终任务执行结果
                lastJobExecutionResult = jobResult.toJobExecutionResult(classLoader);
                return lastJobExecutionResult;
              } catch (JobExecutionException | IOException | ClassNotFoundException e) {
                throw new ProgramInvocationException("Job failed", jobGraph.getJobID(), e);
              }
            }
          }
        
             // 根据任务视图异步提交任务, 返回CompletableFuture, 泛型为任务提交结果
          @Override
          public CompletableFuture<JobSubmissionResult> submitJob(@Nonnull JobGraph jobGraph) {
            return miniCluster.submitJob(jobGraph);
          }
        
            // 根据任务视图异步提交任务, 返回CompletableFuture, 泛型为任务结果
          @Override
          public CompletableFuture<JobResult> requestJobResult(@Nonnull JobID jobId) {
            return miniCluster.requestJobResult(jobId);
          }
        
             // 根据任务Id取消任务
          @Override
          public void cancel(JobID jobId) throws Exception {
            miniCluster.cancelJob(jobId).get();
          }
        
             // 根据保存点取消任务
          @Override
          public String cancelWithSavepoint(JobID jobId, @Nullable String savepointDirectory) throws Exception {
            return miniCluster.triggerSavepoint(jobId, savepointDirectory, true).get();
          }
            // 根据保存点停止
          @Override
          public String stopWithSavepoint(JobID jobId, boolean advanceToEndOfEventTime, @Nullable String savepointDirector) throws Exception {
            return miniCluster.stopWithSavepoint(jobId, savepointDirector, advanceToEndOfEventTime).get();
          }
        
            // 引发保存点, 返回CompletableFuture <泛型为String类的保存点路径>
          @Override
          public CompletableFuture<String> triggerSavepoint(JobID jobId, @Nullable String savepointDirectory) {
            return miniCluster.triggerSavepoint(jobId, savepointDirectory, false);
          }
            
            // 释放保存点, 返回一个泛型为认可类的CompletableFuture
          @Override
          public CompletableFuture<Acknowledge> disposeSavepoint(String savepointPath) {
            return miniCluster.disposeSavepoint(savepointPath);
          }
            // 获取任务列表, 返回一个泛型为任务状态消息集合的CompletableFuture
          @Override
          public CompletableFuture<Collection<JobStatusMessage>> listJobs() {
            return miniCluster.listJobs();
          }
        
            // 获取累加器, 返回以累加器名 作为Key 和 可选的失败? (其实它在成功时会返回成功的值, 在失败时会返回导致失败的原因) 作为 value 的 Map
          @Override
          public Map<String, OptionalFailure<Object>> getAccumulators(JobID jobID, ClassLoader loader) throws Exception {
                 // 获取待评估的执行图
            AccessExecutionGraph executionGraph = miniCluster.getExecutionGraph(jobID).get();
                 // 获取序列化过的累加器
            Map<String, SerializedValue<OptionalFailure<Object>>> accumulatorsSerialized = executionGraph.getAccumulatorsSerialized();
            Map<String, OptionalFailure<Object>> result = new HashMap<>(accumulatorsSerialized.size());
                 // 遍历该哈希表
            for (Map.Entry<String, SerializedValue<OptionalFailure<Object>>> acc : accumulatorsSerialized.entrySet()) {
                    // 将累加器的key 和 反序列化过的累加器的值 存入结果
              result.put(acc.getKey(), acc.getValue().deserializeValue(loader));
            }
            return result;
          }
        
             // 获取任务状态
          @Override
          public CompletableFuture<JobStatus> getJobStatus(JobID jobId) {
            return miniCluster.getJobStatus(jobId);
          }
        
            // 获取集群Id
          @Override
          public MiniClusterClient.MiniClusterId getClusterId() {
            return MiniClusterId.INSTANCE;
          }
            // 获取网页界面URL
          @Override
          public String getWebInterfaceURL() {
            return miniCluster.getRestAddress().toString();
          }
        
          enum MiniClusterId {
            INSTANCE
          }
        }
        
        • OptimizerPlanEnvironment 优化程序计划环境
        package org.apache.flink.client.program;
        
        import org.apache.flink.api.common.JobExecutionResult;
        import org.apache.flink.api.common.Plan;
        import org.apache.flink.api.java.ExecutionEnvironment;
        import org.apache.flink.api.java.ExecutionEnvironmentFactory;
        import org.apache.flink.optimizer.Optimizer;
        import org.apache.flink.optimizer.plan.FlinkPlan;
        
        import java.io.ByteArrayOutputStream;
        import java.io.PrintStream;
        
        /**
         * An {@link ExecutionEnvironment} that never executes a job but only creates the optimized plan.
         * 一个从不执行任务, 仅创建优化的计划
         */
        public class OptimizerPlanEnvironment extends ExecutionEnvironment {
        
          private final Optimizer compiler;
        
          private FlinkPlan optimizerPlan;
        
          public OptimizerPlanEnvironment(Optimizer compiler) {
            this.compiler = compiler;
          }
        
          // ------------------------------------------------------------------------
          //  Execution Environment methods 执行环节方法
          // ------------------------------------------------------------------------
        
          @Override
          public JobExecutionResult execute(String jobName) throws Exception {
            Plan plan = createProgramPlan(jobName);
            this.optimizerPlan = compiler.compile(plan);
        
            // do not go on with anything now! 此时不要执行其他任何事, 不然会抛出该异常
            throw new ProgramAbortException();
          }
        
          // 根据打包的程序获取优化计划
          public FlinkPlan getOptimizedPlan(PackagedProgram prog) throws ProgramInvocationException {
        
            // temporarily write syserr and sysout to a byte array.
            // 临时 通过syserr 和 sysout写出为一个字节数组
            PrintStream originalOut = System.out;
            PrintStream originalErr = System.err;
            ByteArrayOutputStream baos = new ByteArrayOutputStream();
            System.setOut(new PrintStream(baos));
            ByteArrayOutputStream baes = new ByteArrayOutputStream();
            System.setErr(new PrintStream(baes));
        
            setAsContext();
            try {
              // 请求交互模式执行
              prog.invokeInteractiveModeForExecution();
            }
            catch (ProgramInvocationException e) {
              throw e;
            }
            catch (Throwable t) {
              // the invocation gets aborted with the preview plan
              // 此调用会随着此前的计划流产
              if (optimizerPlan != null) {
                return optimizerPlan;
              } else {
                throw new ProgramInvocationException("The program caused an error: ", t);
              }
            }
            finally {
              // 取消设置为上下文
              unsetAsContext();
              System.setOut(originalOut);
              System.setErr(originalErr);
            }
        
            String stdout = baos.toString();
            String stderr = baes.toString();
        
            throw new ProgramInvocationException(
                "The program plan could not be fetched - the program aborted pre-maturely."
                    + "\n\nSystem.err: " + (stderr.length() == 0 ? "(none)" : stderr)
                    + "\n\nSystem.out: " + (stdout.length() == 0 ? "(none)" : stdout));
          }
          // ------------------------------------------------------------------------
        
          // 设置为上下文
          private void setAsContext() {
            ExecutionEnvironmentFactory factory = new ExecutionEnvironmentFactory() {
        
              @Override
              public ExecutionEnvironment createExecutionEnvironment() {
                return OptimizerPlanEnvironment.this;
              }
            };
            initializeContextEnvironment(factory);
          }
          // 取消设置为上下文
          private void unsetAsContext() {
            resetContextEnvironment();
          }
        
          // ------------------------------------------------------------------------
        
          public void setPlan(FlinkPlan plan){
            this.optimizerPlan = plan;
          }
        
          /**
           * A special exception used to abort programs when the caller is only interested * in the program plan, rather than in the full execution.
           *  当请求者只对程序计划感兴趣而不关心整个的执行时, 用于将程序流产(中断)的特殊异常类
           */
          public static final class ProgramAbortException extends Error {
            private static final long serialVersionUID = 1L;
          }
        }
        
        • PackagedProgram 打包的程序
        package org.apache.flink.client.program;
        
        import org.apache.flink.api.common.ProgramDescription;
        import org.apache.flink.client.ClientUtils;
        import org.apache.flink.configuration.ConfigConstants;
        import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
        import org.apache.flink.util.InstantiationUtil;
        
        import javax.annotation.Nullable;
        
        import java.io.BufferedInputStream;
        import java.io.File;
        import java.io.FileOutputStream;
        import java.io.IOException;
        import java.io.InputStream;
        import java.io.OutputStream;
        import java.lang.reflect.InvocationTargetException;
        import java.lang.reflect.Method;
        import java.lang.reflect.Modifier;
        import java.net.MalformedURLException;
        import java.net.URISyntaxException;
        import java.net.URL;
        import java.nio.file.FileSystems;
        import java.nio.file.FileVisitResult;
        import java.nio.file.Files;
        import java.nio.file.Path;
        import java.nio.file.SimpleFileVisitor;
        import java.nio.file.attribute.BasicFileAttributes;
        import java.util.ArrayList;
        import java.util.Collections;
        import java.util.Enumeration;
        import java.util.List;
        import java.util.Random;
        import java.util.jar.Attributes;
        import java.util.jar.JarEntry;
        import java.util.jar.JarFile;
        import java.util.jar.Manifest;
        
        /**
         * This class encapsulates represents a program, packaged in a jar file. It supplies
         * functionality to extract nested libraries, search for the program entry point, 
         * and extract a program plan.
         * 该类代表了一个被封装的程序, 比如被打成jar包的文件。 
         * 它提供了从嵌套的资料中查找程序的进入点并且抽取程序计划的功能
         *
         */
        public class PackagedProgram {
        
          /**
           * Property name of the entry in JAR manifest file that describes the Flink 
           * specific entry point.
           * 描述了Flink 特定进入点的 jar manifest 文件中入口 的特征名 
           */
          public static final String MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS = "program-class";
        
          /**
           * Property name of the entry in JAR manifest file that describes the class with * the main method.
           * 描述了主方法类的 jar manifest 文件中入口 的特征名
           */
          public static final String MANIFEST_ATTRIBUTE_MAIN_CLASS = "Main-Class";
        
          // --------------------------------------------------------------------------------------------
        
          private final URL jarFile;
        
          private final String[] args;
        
          private final Class<?> mainClass;
        
          private final List<File> extractedTempLibraries;
        
          private final List<URL> classpaths;
        
          private ClassLoader userCodeClassLoader;
        
          private SavepointRestoreSettings savepointSettings = SavepointRestoreSettings.none();
        
          /**
           * Flag indicating whether the job is a Python job.
           * 判断这是否是一个python任务的标签
           */
          private final boolean isPython;
        
          /**
           * Creates an instance that wraps the plan defined in the jar file using the given
           * argument.
           * 创建一个实例来包装 在 jar文件中 由已得参数 定义的 计划
           *
           * @param jarFile
           *        The jar file which contains the plan and a Manifest which defines
           *        the program-class
           *        该 jar 文件包含了计划 和 一个 定义了程序类的 manifest 文件
           * @param args
           *        Optional. The arguments used to create the pact plan, depend on
           *        implementation of the pact plan. See getDescription().
           *        可选的参数, 该参数被用于创建打包计划, 依赖于打包的计划的应用. 
           * 详情请看获取描述方法
           * @throws ProgramInvocationException
           *         This invocation is thrown if the Program can't be properly loaded. 
           * Causes may be a missing / wrong class or manifest files.
           * 如果程序不能正确的加载, 该调用会被抛出。 可能导致其发生的原因可能为一个 丢失的/错误的 类 或 manifest文件
           */
          public PackagedProgram(File jarFile, String... args) throws ProgramInvocationException {
            this(jarFile, Collections.<URL>emptyList(), null, args);
          }
        
          /**
           * Creates an instance that wraps the plan defined in the jar file using the given
           * argument.
           * 创建了一个包装了在jar文件中通过已得参数定义的计划
           *
           * @param jarFile
           *        The jar file which contains the plan and a Manifest which defines
           *        the program-class
           *        包含了计划 和 一个 定义了 程序类的 manifest文件
           * @param classpaths
           *        Additional classpath URLs needed by the Program.
           *        程序所需的额外 URL 路径
           * @param args
           *        Optional. The arguments used to create the pact plan, depend on
           *        implementation of the pact plan. See getDescription().
           * 可选的参数, 该参数被用于创建打包计划, 依赖于打包的计划的应用. 详情请看获取描述方法
           * @throws ProgramInvocationException
           *         This invocation is thrown if the Program can't be properly loaded. 
           * Causes may be a missing / wrong class or manifest files.
           * 如果程序不能正确的加载, 该调用会被抛出。 可能导致其发生的原因可能为一个 丢失的/错误的
           * 类 或 manifest文件
           */
          public PackagedProgram(File jarFile, List<URL> classpaths, String... args) throws ProgramInvocationException {
            this(jarFile, classpaths, null, args);
          }
        
          /**
           * Creates an instance that wraps the plan defined in the jar file using the given
           * arguments. For generating the plan the class defined in the className parameter
           * is used.
           *
           * @param jarFile
           *        The jar file which contains the plan.
           * @param entryPointClassName
           *        Name of the class which generates the plan. Overrides the class defined
           *        in the jar file manifest
           *        生成计划的进入点名称类, 重载了定义在jar manifest 文件中定义的类
           * @param args
           *        Optional. The arguments used to create the pact plan, depend on
           *        implementation of the pact plan. See getDescription().
           * @throws ProgramInvocationException
           *         This invocation is thrown if the Program can't be properly loaded. Causes may be a missing / wrong class or manifest files.
           */
          public PackagedProgram(File jarFile, @Nullable String entryPointClassName, String... args) throws ProgramInvocationException {
            this(jarFile, Collections.<URL>emptyList(), entryPointClassName, args);
          }
        
          /**
           * Creates an instance that wraps the plan defined in the jar file using the given
           * arguments. For generating the plan the class defined in the className parameter
           * is used.
           *
           * @param jarFile
           *        The jar file which contains the plan.
           * @param classpaths
           *        Additional classpath URLs needed by the Program.
           * @param entryPointClassName
           *        Name of the class which generates the plan. Overrides the class defined
           *        in the jar file manifest
           * @param args
           *        Optional. The arguments used to create the pact plan, depend on
           *        implementation of the pact plan. See getDescription().
           * @throws ProgramInvocationException
           *         This invocation is thrown if the Program can't be properly loaded. 
           * Causes may be a missing / wrong class or manifest files.
           */
          public PackagedProgram(File jarFile, List<URL> classpaths, @Nullable String entryPointClassName, String... args) throws ProgramInvocationException {
            // Whether the job is a Python job. 判断是否为python任务
            isPython = entryPointClassName != null && (entryPointClassName.equals("org.apache.flink.client.python.PythonDriver")
              || entryPointClassName.equals("org.apache.flink.client.python.PythonGatewayServer"));
        
            URL jarFileUrl = null;
            if (jarFile != null) {
              try {
                jarFileUrl = jarFile.getAbsoluteFile().toURI().toURL();
              } catch (MalformedURLException e1) {
                throw new IllegalArgumentException("The jar file path is invalid.");
              }
               // 检查文件路径
              checkJarFile(jarFileUrl);
            } else if (!isPython) {
              throw new IllegalArgumentException("The jar file must not be null.");
            }
        
            this.jarFile = jarFileUrl;
            this.args = args == null ? new String[0] : args;
        
            // if no entryPointClassName name was given, we try and look one up through the manifest
            // 如果没有进入点类名称, 我们回去manifest文件中去找一个
            if (entryPointClassName == null) {
              entryPointClassName = getEntryPointClassNameFromJar(jarFileUrl);
            }
        
            // now that we have an entry point, we can extract the nested jar files (if any)
            // 现在我们有进入点了, 我们key抽取任何嵌套的jar文件了
            this.extractedTempLibraries = jarFileUrl == null ? Collections.emptyList() : extractContainedLibraries(jarFileUrl);
            this.classpaths = classpaths;
            this.userCodeClassLoader = ClientUtils.buildUserCodeClassLoader(getAllLibraries(), classpaths, getClass().getClassLoader());
        
            // load the entry point class 加载进入点类
            this.mainClass = loadMainClass(entryPointClassName, userCodeClassLoader);
        
            if (!hasMainMethod(mainClass)) {
              throw new ProgramInvocationException("The given program class does not have a main(String[]) method.");
            }
          }
        
          public PackagedProgram(Class<?> entryPointClass, String... args) throws ProgramInvocationException {
            this.jarFile = null;
            this.args = args == null ? new String[0] : args;
        
            this.extractedTempLibraries = Collections.emptyList();
            this.classpaths = Collections.emptyList();
            this.userCodeClassLoader = entryPointClass.getClassLoader();
        
            // load the entry point class
            this.mainClass = entryPointClass;
            isPython = entryPointClass.getCanonicalName().equals("org.apache.flink.client.python.PythonDriver");
        
            if (!hasMainMethod(mainClass)) {
              throw new ProgramInvocationException("The given program class does not have a main(String[]) method.");
            }
          }
        
          public void setSavepointRestoreSettings(SavepointRestoreSettings savepointSettings) {
            this.savepointSettings = savepointSettings;
          }
        
          public SavepointRestoreSettings getSavepointSettings() {
            return savepointSettings;
          }
        
          public String[] getArguments() {
            return this.args;
          }
        
          public String getMainClassName() {
            return this.mainClass.getName();
          }
        
          /**
           * Returns the description provided by the Program class. This
           * may contain a description of the plan itself and its arguments.
           *
           * @return The description of the PactProgram's input parameters.
           * @throws ProgramInvocationException
           *         This invocation is thrown if the Program can't be properly loaded. Causes
           *         may be a missing / wrong class or manifest files.
           */
          @Nullable
          public String getDescription() throws ProgramInvocationException {
            if (ProgramDescription.class.isAssignableFrom(this.mainClass)) {
        
              ProgramDescription descr;
              try {
                descr = InstantiationUtil.instantiate(
                    this.mainClass.asSubclass(ProgramDescription.class), ProgramDescription.class);
              } catch (Throwable t) {
                return null;
              }
        
              try {
                return descr.getDescription();
              }
              catch (Throwable t) {
                throw new ProgramInvocationException("Error while getting the program description" +
                    (t.getMessage() == null ? "." : ": " + t.getMessage()), t);
              }
        
            } else {
              return null;
            }
          }
        
          /**
           * This method assumes that the context environment is prepared, or the execution
           * will be a local execution by default.
           */
          public void invokeInteractiveModeForExecution() throws ProgramInvocationException{
            callMainMethod(mainClass, args);
          }
        
          /**
           * Returns the classpaths that are required by the program.
           *
           * @return List of {@link java.net.URL}s.
           */
          public List<URL> getClasspaths() {
            return this.classpaths;
          }
        
          /**
           * Gets the {@link java.lang.ClassLoader} that must be used to load user code classes.
           *
           * @return The user code ClassLoader.
           */
          public ClassLoader getUserCodeClassLoader() {
            return this.userCodeClassLoader;
          }
        
          /**
           * Returns all provided libraries needed to run the program.
           */
          public List<URL> getAllLibraries() {
            List<URL> libs = new ArrayList<URL>(this.extractedTempLibraries.size() + 1);
        
            if (jarFile != null) {
              libs.add(jarFile);
            }
            for (File tmpLib : this.extractedTempLibraries) {
              try {
                libs.add(tmpLib.getAbsoluteFile().toURI().toURL());
              }
              catch (MalformedURLException e) {
                throw new RuntimeException("URL is invalid. This should not happen.", e);
              }
            }
        
            if (isPython) {
              String flinkOptPath = System.getenv(ConfigConstants.ENV_FLINK_OPT_DIR);
              final List<Path> pythonJarPath = new ArrayList<>();
              try {
                Files.walkFileTree(FileSystems.getDefault().getPath(flinkOptPath), new SimpleFileVisitor<Path>() {
                  @Override
                  public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
                    FileVisitResult result = super.visitFile(file, attrs);
                    if (file.getFileName().toString().startsWith("flink-python")) {
                      pythonJarPath.add(file);
                    }
                    return result;
                  }
                });
              } catch (IOException e) {
                throw new RuntimeException(
                  "Exception encountered during finding the flink-python jar. This should not happen.", e);
              }
        
              if (pythonJarPath.size() != 1) {
                throw new RuntimeException("Found " + pythonJarPath.size() + " flink-python jar.");
              }
        
              try {
                libs.add(pythonJarPath.get(0).toUri().toURL());
              } catch (MalformedURLException e) {
                throw new RuntimeException("URL is invalid. This should not happen.", e);
              }
            }
        
            return libs;
          }
        
          /**
           * Deletes all temporary files created for contained packaged libraries.
           */
          public void deleteExtractedLibraries() {
            deleteExtractedLibraries(this.extractedTempLibraries);
            this.extractedTempLibraries.clear();
          }
        
          private static boolean hasMainMethod(Class<?> entryClass) {
            Method mainMethod;
            try {
              mainMethod = entryClass.getMethod("main", String[].class);
            } catch (NoSuchMethodException e) {
              return false;
            }
            catch (Throwable t) {
              throw new RuntimeException("Could not look up the main(String[]) method from the class " +
                  entryClass.getName() + ": " + t.getMessage(), t);
            }
        
            return Modifier.isStatic(mainMethod.getModifiers()) && Modifier.isPublic(mainMethod.getModifiers());
          }
        
          private static void callMainMethod(Class<?> entryClass, String[] args) throws ProgramInvocationException {
            Method mainMethod;
            if (!Modifier.isPublic(entryClass.getModifiers())) {
              throw new ProgramInvocationException("The class " + entryClass.getName() + " must be public.");
            }
        
            try {
              mainMethod = entryClass.getMethod("main", String[].class);
            } catch (NoSuchMethodException e) {
              throw new ProgramInvocationException("The class " + entryClass.getName() + " has no main(String[]) method.");
            }
            catch (Throwable t) {
              throw new ProgramInvocationException("Could not look up the main(String[]) method from the class " +
                  entryClass.getName() + ": " + t.getMessage(), t);
            }
        
            if (!Modifier.isStatic(mainMethod.getModifiers())) {
              throw new ProgramInvocationException("The class " + entryClass.getName() + " declares a non-static main method.");
            }
            if (!Modifier.isPublic(mainMethod.getModifiers())) {
              throw new ProgramInvocationException("The class " + entryClass.getName() + " declares a non-public main method.");
            }
        
            try {
              mainMethod.invoke(null, (Object) args);
            }
            catch (IllegalArgumentException e) {
              throw new ProgramInvocationException("Could not invoke the main method, arguments are not matching.", e);
            }
            catch (IllegalAccessException e) {
              throw new ProgramInvocationException("Access to the main method was denied: " + e.getMessage(), e);
            }
            catch (InvocationTargetException e) {
              Throwable exceptionInMethod = e.getTargetException();
              if (exceptionInMethod instanceof Error) {
                throw (Error) exceptionInMethod;
              } else if (exceptionInMethod instanceof ProgramParametrizationException) {
                throw (ProgramParametrizationException) exceptionInMethod;
              } else if (exceptionInMethod instanceof ProgramInvocationException) {
                throw (ProgramInvocationException) exceptionInMethod;
              } else {
                throw new ProgramInvocationException("The main method caused an error: " + exceptionInMethod.getMessage(), exceptionInMethod);
              }
            }
            catch (Throwable t) {
              throw new ProgramInvocationException("An error occurred while invoking the program's main method: " + t.getMessage(), t);
            }
          }
        
          private static String getEntryPointClassNameFromJar(URL jarFile) throws ProgramInvocationException {
            JarFile jar;
            Manifest manifest;
            String className;
        
            // Open jar file
            try {
              jar = new JarFile(new File(jarFile.toURI()));
            } catch (URISyntaxException use) {
              throw new ProgramInvocationException("Invalid file path '" + jarFile.getPath() + "'", use);
            } catch (IOException ioex) {
              throw new ProgramInvocationException("Error while opening jar file '" + jarFile.getPath() + "'. "
                + ioex.getMessage(), ioex);
            }
        
            // jar file must be closed at the end
            try {
              // Read from jar manifest
              try {
                manifest = jar.getManifest();
              } catch (IOException ioex) {
                throw new ProgramInvocationException("The Manifest in the jar file could not be accessed '"
                  + jarFile.getPath() + "'. " + ioex.getMessage(), ioex);
              }
        
              if (manifest == null) {
                throw new ProgramInvocationException("No manifest found in jar file '" + jarFile.getPath() + "'. The manifest is need to point to the program's main class.");
              }
        
              Attributes attributes = manifest.getMainAttributes();
        
              // check for a "program-class" entry first
              className = attributes.getValue(PackagedProgram.MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS);
              if (className != null) {
                return className;
              }
        
              // check for a main class
              className = attributes.getValue(PackagedProgram.MANIFEST_ATTRIBUTE_MAIN_CLASS);
              if (className != null) {
                return className;
              } else {
                throw new ProgramInvocationException("Neither a '" + MANIFEST_ATTRIBUTE_MAIN_CLASS + "', nor a '" +
                    MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS + "' entry was found in the jar file.");
              }
            }
            finally {
              try {
                jar.close();
              } catch (Throwable t) {
                throw new ProgramInvocationException("Could not close the JAR file: " + t.getMessage(), t);
              }
            }
          }
        
          private static Class<?> loadMainClass(String className, ClassLoader cl) throws ProgramInvocationException {
            ClassLoader contextCl = null;
            try {
              contextCl = Thread.currentThread().getContextClassLoader();
              Thread.currentThread().setContextClassLoader(cl);
              return Class.forName(className, false, cl);
            }
            catch (ClassNotFoundException e) {
              throw new ProgramInvocationException("The program's entry point class '" + className
                + "' was not found in the jar file.", e);
            }
            catch (ExceptionInInitializerError e) {
              throw new ProgramInvocationException("The program's entry point class '" + className
                + "' threw an error during initialization.", e);
            }
            catch (LinkageError e) {
              throw new ProgramInvocationException("The program's entry point class '" + className
                + "' could not be loaded due to a linkage failure.", e);
            }
            catch (Throwable t) {
              throw new ProgramInvocationException("The program's entry point class '" + className
                + "' caused an exception during initialization: " + t.getMessage(), t);
            } finally {
              if (contextCl != null) {
                Thread.currentThread().setContextClassLoader(contextCl);
              }
            }
          }
        
          /**
           * Takes all JAR files that are contained in this program's JAR file and extracts them
           * to the system's temp directory.
           *
           * @return The file names of the extracted temporary files.
           * @throws ProgramInvocationException Thrown, if the extraction process failed.
           */
          public static List<File> extractContainedLibraries(URL jarFile) throws ProgramInvocationException {
        
            Random rnd = new Random();
        
            JarFile jar = null;
            try {
              jar = new JarFile(new File(jarFile.toURI()));
              final List<JarEntry> containedJarFileEntries = new ArrayList<JarEntry>();
        
              Enumeration<JarEntry> entries = jar.entries();
              while (entries.hasMoreElements()) {
                JarEntry entry = entries.nextElement();
                String name = entry.getName();
        
                if (name.length() > 8 && name.startsWith("lib/") && name.endsWith(".jar")) {
                  containedJarFileEntries.add(entry);
                }
              }
        
              if (containedJarFileEntries.isEmpty()) {
                return Collections.emptyList();
              }
              else {
                // go over all contained jar files
                final List<File> extractedTempLibraries = new ArrayList<File>(containedJarFileEntries.size());
                final byte[] buffer = new byte[4096];
        
                boolean incomplete = true;
        
                try {
                  for (int i = 0; i < containedJarFileEntries.size(); i++) {
                    final JarEntry entry = containedJarFileEntries.get(i);
                    String name = entry.getName();
                    // '/' as in case of zip, jar
                    // java.util.zip.ZipEntry#isDirectory always looks only for '/' not for File.separator
                    name = name.replace('/', '_');
        
                    File tempFile;
                    try {
                      tempFile = File.createTempFile(rnd.nextInt(Integer.MAX_VALUE) + "_", name);
                      tempFile.deleteOnExit();
                    }
                    catch (IOException e) {
                      throw new ProgramInvocationException(
                        "An I/O error occurred while creating temporary file to extract nested library '" +
                            entry.getName() + "'.", e);
                    }
        
                    extractedTempLibraries.add(tempFile);
        
                    // copy the temp file contents to a temporary File
                    OutputStream out = null;
                    InputStream in = null;
                    try {
        
                      out = new FileOutputStream(tempFile);
                      in = new BufferedInputStream(jar.getInputStream(entry));
        
                      int numRead = 0;
                      while ((numRead = in.read(buffer)) != -1) {
                        out.write(buffer, 0, numRead);
                      }
                    }
                    catch (IOException e) {
                      throw new ProgramInvocationException("An I/O error occurred while extracting nested library '"
                          + entry.getName() + "' to temporary file '" + tempFile.getAbsolutePath() + "'.");
                    }
                    finally {
                      if (out != null) {
                        out.close();
                      }
                      if (in != null) {
                        in.close();
                      }
                    }
                  }
        
                  incomplete = false;
                }
                finally {
                  if (incomplete) {
                    deleteExtractedLibraries(extractedTempLibraries);
                  }
                }
        
                return extractedTempLibraries;
              }
            }
            catch (Throwable t) {
              throw new ProgramInvocationException("Unknown I/O error while extracting contained jar files.", t);
            }
            finally {
              if (jar != null) {
                try {
                  jar.close();
                } catch (Throwable t) {}
              }
            }
          }
        
          public static void deleteExtractedLibraries(List<File> tempLibraries) {
            for (File f : tempLibraries) {
              f.delete();
            }
          }
        
          private static void checkJarFile(URL jarfile) throws ProgramInvocationException {
            try {
              ClientUtils.checkJarFile(jarfile);
            }
            catch (IOException e) {
              throw new ProgramInvocationException(e.getMessage(), e);
            }
            catch (Throwable t) {
              throw new ProgramInvocationException("Cannot access jar file" + (t.getMessage() == null ? "." : ": " + t.getMessage()), t);
            }
          }
        
        }
        
        • PackagedProgramUtils 打包的程序工具类
        package org.apache.flink.client.program;
        
        import org.apache.flink.api.common.JobID;
        import org.apache.flink.configuration.Configuration;
        import org.apache.flink.core.fs.Path;
        import org.apache.flink.optimizer.DataStatistics;
        import org.apache.flink.optimizer.Optimizer;
        import org.apache.flink.optimizer.costs.DefaultCostEstimator;
        import org.apache.flink.optimizer.plan.FlinkPlan;
        import org.apache.flink.optimizer.plan.OptimizedPlan;
        import org.apache.flink.optimizer.plan.StreamingPlan;
        import org.apache.flink.optimizer.plantranslate.JobGraphGenerator;
        import org.apache.flink.runtime.jobgraph.JobGraph;
        
        import javax.annotation.Nullable;
        
        import java.net.URISyntaxException;
        import java.net.URL;
        
        /**
         * Utility class for {@link PackagedProgram} related operations.
         */
        public class PackagedProgramUtils {
        
          /**
           * Creates a {@link JobGraph} with a specified {@link JobID}
           * from the given {@link PackagedProgram}.
           *
           * @param packagedProgram to extract the JobGraph from
           * @param configuration to use for the optimizer and job graph generator
           * @param defaultParallelism for the JobGraph
           * @param jobID the pre-generated job id
           * @return JobGraph extracted from the PackagedProgram
           * @throws ProgramInvocationException if the JobGraph generation failed
           */
          public static JobGraph createJobGraph(
              PackagedProgram packagedProgram,
              Configuration configuration,
              int defaultParallelism,
              @Nullable JobID jobID) throws ProgramInvocationException {
            Thread.currentThread().setContextClassLoader(packagedProgram.getUserCodeClassLoader());
            final Optimizer optimizer = new Optimizer(new DataStatistics(), new DefaultCostEstimator(), configuration);
        
            final OptimizerPlanEnvironment optimizerPlanEnvironment = new OptimizerPlanEnvironment(optimizer);
            optimizerPlanEnvironment.setParallelism(defaultParallelism);
        
            final FlinkPlan flinkPlan = optimizerPlanEnvironment.getOptimizedPlan(packagedProgram);
        
            final JobGraph jobGraph;
        
            if (flinkPlan instanceof StreamingPlan) {
              jobGraph = ((StreamingPlan) flinkPlan).getJobGraph(jobID);
              jobGraph.setSavepointRestoreSettings(packagedProgram.getSavepointSettings());
            } else {
              final JobGraphGenerator jobGraphGenerator = new JobGraphGenerator(configuration);
              jobGraph = jobGraphGenerator.compileJobGraph((OptimizedPlan) flinkPlan, jobID);
            }
        
            for (URL url : packagedProgram.getAllLibraries()) {
              try {
                jobGraph.addJar(new Path(url.toURI()));
              } catch (URISyntaxException e) {
                throw new ProgramInvocationException("Invalid URL for jar file: " + url + '.', jobGraph.getJobID(), e);
              }
            }
        
            jobGraph.setClasspaths(packagedProgram.getClasspaths());
        
            return jobGraph;
          }
        
          /**
           * Creates a {@link JobGraph} with a random {@link JobID}
           * from the given {@link PackagedProgram}.
           *
           * @param packagedProgram to extract the JobGraph from
           * @param configuration to use for the optimizer and job graph generator
           * @param defaultParallelism for the JobGraph
           * @return JobGraph extracted from the PackagedProgram
           * @throws ProgramInvocationException if the JobGraph generation failed
           */
          public static JobGraph createJobGraph(
            PackagedProgram packagedProgram,
            Configuration configuration,
            int defaultParallelism) throws ProgramInvocationException {
            return createJobGraph(packagedProgram, configuration, defaultParallelism, null);
          }
        
          private PackagedProgramUtils() {}
        }
        
        
        • ProgramInvocationException 程序调用异常
        package org.apache.flink.client.program;
        
        import org.apache.flink.api.common.JobID;
        
        /**
         * Exception used to indicate that there is an error during the invocation of a Flink program.
         */
        public class ProgramInvocationException extends Exception {
          /**
           * Serial version UID for serialization interoperability.
           */
          private static final long serialVersionUID = -2417524218857151612L;
        
          /**
           * Creates a <tt>ProgramInvocationException</tt> with the given message.
           *
           * @param message
           *        The message for the exception.
           */
          public ProgramInvocationException(String message) {
            super(message);
          }
        
          /**
           * Creates a <tt>ProgramInvocationException</tt> with the given message which contains job id.
           *
           * @param message
           *        The additional message.
           * @param jobID
           *        ID of failed job.
           */
          public ProgramInvocationException(String message, JobID jobID) {
            super(message + " (JobID: " + jobID + ")");
          }
        
          /**
           * Creates a <tt>ProgramInvocationException</tt> for the given exception.
           *
           * @param cause
           *        The exception that causes the program invocation to fail.
           */
          public ProgramInvocationException(Throwable cause) {
            super(cause);
          }
        
          /**
           * Creates a <tt>ProgramInvocationException</tt> for the given exception with an
           * additional message.
           *
           * @param message
           *        The additional message.
           * @param cause
           *        The exception that causes the program invocation to fail.
           */
          public ProgramInvocationException(String message, Throwable cause) {
            super(message, cause);
          }
        
          /**
           * Creates a <tt>ProgramInvocationException</tt> for the given exception with an
           * additional message which contains job id.
           *
           * @param message
           *        The additional message.
           * @param jobID
           *        ID of failed job.
           * @param cause
           *        The exception that causes the program invocation to fail.
           */
          public ProgramInvocationException(String message, JobID jobID, Throwable cause) {
            super(message + " (JobID: " + jobID + ")", cause);
          }
        }
        
        • ProgramMissingJobException 程序没有任务异常
        package org.apache.flink.client.program;
        
        import org.apache.flink.util.FlinkException;
        
        /**
         * Exception used to indicate that no job was executed during the invocation of a Flink program.
         */
        public class ProgramMissingJobException extends FlinkException {
          /**
           * Serial version UID for serialization interoperability.
           */
          private static final long serialVersionUID = -1964276369605091101L;
        
          public ProgramMissingJobException(String message) {
            super(message);
          }
        }
        
        • ProgramParametrizationException 程序参数化异常
        package org.apache.flink.client.program;
        
        import org.apache.flink.util.Preconditions;
        
        /**
         * Exception used to indicate that there is an error in the parametrization of a Flink program.
         */
        public class ProgramParametrizationException extends RuntimeException {
          /**
           * Serial version UID for serialization interoperability.
           */
          private static final long serialVersionUID = 909054589029890262L;
        
          /**
           * Creates a <tt>ProgramParametrizationException</tt> with the given message.
           *
           * @param message
           *        The program usage string.
           */
          public ProgramParametrizationException(String message) {
            super(Preconditions.checkNotNull(message));
          }
        }
        
        • rest

          • RestClusterClient 其余集群客户端
          package org.apache.flink.client.program.rest;
          
          import org.apache.flink.annotation.VisibleForTesting;
          import org.apache.flink.api.common.JobID;
          import org.apache.flink.api.common.JobSubmissionResult;
          import org.apache.flink.api.common.accumulators.AccumulatorHelper;
          import org.apache.flink.api.common.cache.DistributedCache;
          import org.apache.flink.api.common.time.Time;
          import org.apache.flink.api.java.tuple.Tuple2;
          import org.apache.flink.client.program.ClusterClient;
          import org.apache.flink.client.program.DetachedJobExecutionResult;
          import org.apache.flink.client.program.ProgramInvocationException;
          import org.apache.flink.client.program.rest.retry.ExponentialWaitStrategy;
          import org.apache.flink.client.program.rest.retry.WaitStrategy;
          import org.apache.flink.configuration.Configuration;
          import org.apache.flink.core.fs.Path;
          import org.apache.flink.runtime.akka.AkkaUtils;
          import org.apache.flink.runtime.client.JobExecutionException;
          import org.apache.flink.runtime.client.JobStatusMessage;
          import org.apache.flink.runtime.client.JobSubmissionException;
          import org.apache.flink.runtime.concurrent.FutureUtils;
          import org.apache.flink.runtime.concurrent.ScheduledExecutorServiceAdapter;
          import org.apache.flink.runtime.highavailability.ClientHighAvailabilityServices;
          import org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils;
          import org.apache.flink.runtime.jobgraph.JobGraph;
          import org.apache.flink.runtime.jobgraph.JobStatus;
          import org.apache.flink.runtime.jobmaster.JobResult;
          import org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService;
          import org.apache.flink.runtime.messages.Acknowledge;
          import org.apache.flink.runtime.rest.FileUpload;
          import org.apache.flink.runtime.rest.RestClient;
          import org.apache.flink.runtime.rest.handler.async.AsynchronousOperationInfo;
          import org.apache.flink.runtime.rest.handler.async.TriggerResponse;
          import org.apache.flink.runtime.rest.messages.EmptyMessageParameters;
          import org.apache.flink.runtime.rest.messages.EmptyRequestBody;
          import org.apache.flink.runtime.rest.messages.EmptyResponseBody;
          import org.apache.flink.runtime.rest.messages.JobAccumulatorsHeaders;
          import org.apache.flink.runtime.rest.messages.JobAccumulatorsInfo;
          import org.apache.flink.runtime.rest.messages.JobAccumulatorsMessageParameters;
          import org.apache.flink.runtime.rest.messages.JobCancellationHeaders;
          import org.apache.flink.runtime.rest.messages.JobCancellationMessageParameters;
          import org.apache.flink.runtime.rest.messages.JobMessageParameters;
          import org.apache.flink.runtime.rest.messages.JobsOverviewHeaders;
          import org.apache.flink.runtime.rest.messages.MessageHeaders;
          import org.apache.flink.runtime.rest.messages.MessageParameters;
          import org.apache.flink.runtime.rest.messages.RequestBody;
          import org.apache.flink.runtime.rest.messages.ResponseBody;
          import org.apache.flink.runtime.rest.messages.TerminationModeQueryParameter;
          import org.apache.flink.runtime.rest.messages.TriggerId;
          import org.apache.flink.runtime.rest.messages.cluster.ShutdownHeaders;
          import org.apache.flink.runtime.rest.messages.job.JobDetailsHeaders;
          import org.apache.flink.runtime.rest.messages.job.JobDetailsInfo;
          import org.apache.flink.runtime.rest.messages.job.JobExecutionResultHeaders;
          import org.apache.flink.runtime.rest.messages.job.JobSubmitHeaders;
          import org.apache.flink.runtime.rest.messages.job.JobSubmitRequestBody;
          import org.apache.flink.runtime.rest.messages.job.JobSubmitResponseBody;
          import org.apache.flink.runtime.rest.messages.job.savepoints.SavepointDisposalRequest;
          import org.apache.flink.runtime.rest.messages.job.savepoints.SavepointDisposalStatusHeaders;
          import org.apache.flink.runtime.rest.messages.job.savepoints.SavepointDisposalStatusMessageParameters;
          import org.apache.flink.runtime.rest.messages.job.savepoints.SavepointDisposalTriggerHeaders;
          import org.apache.flink.runtime.rest.messages.job.savepoints.SavepointInfo;
          import org.apache.flink.runtime.rest.messages.job.savepoints.SavepointStatusHeaders;
          import org.apache.flink.runtime.rest.messages.job.savepoints.SavepointStatusMessageParameters;
          import org.apache.flink.runtime.rest.messages.job.savepoints.SavepointTriggerHeaders;
          import org.apache.flink.runtime.rest.messages.job.savepoints.SavepointTriggerMessageParameters;
          import org.apache.flink.runtime.rest.messages.job.savepoints.SavepointTriggerRequestBody;
          import org.apache.flink.runtime.rest.messages.job.savepoints.stop.StopWithSavepointRequestBody;
          import org.apache.flink.runtime.rest.messages.job.savepoints.stop.StopWithSavepointTriggerHeaders;
          import org.apache.flink.runtime.rest.messages.queue.AsynchronouslyCreatedResource;
          import org.apache.flink.runtime.rest.messages.queue.QueueStatus;
          import org.apache.flink.runtime.rest.util.RestClientException;
          import org.apache.flink.runtime.rest.util.RestConstants;
          import org.apache.flink.runtime.util.ExecutorThreadFactory;
          import org.apache.flink.runtime.webmonitor.retriever.LeaderRetriever;
          import org.apache.flink.util.ExceptionUtils;
          import org.apache.flink.util.ExecutorUtils;
          import org.apache.flink.util.FlinkException;
          import org.apache.flink.util.OptionalFailure;
          import org.apache.flink.util.Preconditions;
          import org.apache.flink.util.function.CheckedSupplier;
          
          import org.apache.flink.shaded.netty4.io.netty.channel.ConnectTimeoutException;
          import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpResponseStatus;
          
          import javax.annotation.Nonnull;
          import javax.annotation.Nullable;
          
          import java.io.IOException;
          import java.io.ObjectOutputStream;
          import java.net.MalformedURLException;
          import java.net.URL;
          import java.nio.file.Files;
          import java.nio.file.Paths;
          import java.time.Duration;
          import java.util.ArrayList;
          import java.util.Collection;
          import java.util.Collections;
          import java.util.List;
          import java.util.Map;
          import java.util.concurrent.CompletableFuture;
          import java.util.concurrent.CompletionException;
          import java.util.concurrent.ExecutionException;
          import java.util.concurrent.ExecutorService;
          import java.util.concurrent.Executors;
          import java.util.concurrent.ScheduledExecutorService;
          import java.util.concurrent.TimeUnit;
          import java.util.function.Predicate;
          import java.util.function.Supplier;
          import java.util.stream.Collectors;
          
          /**
           * A {@link ClusterClient} implementation that communicates via HTTP REST requests.
           */
          public class RestClusterClient<T> extends ClusterClient<T> {
          
            private final RestClusterClientConfiguration restClusterClientConfiguration;
          
            /** Timeout for futures. */
            private final Duration timeout;
          
            private final RestClient restClient;
          
            private final ExecutorService executorService = Executors.newFixedThreadPool(4, new ExecutorThreadFactory("Flink-RestClusterClient-IO"));
          
            private final WaitStrategy waitStrategy;
          
            private final T clusterId;
          
            private final ClientHighAvailabilityServices clientHAServices;
          
            private final LeaderRetrievalService webMonitorRetrievalService;
          
            private final LeaderRetriever webMonitorLeaderRetriever = new LeaderRetriever();
          
            /** ExecutorService to run operations that can be retried on exceptions. */
            private ScheduledExecutorService retryExecutorService;
          
            public RestClusterClient(Configuration config, T clusterId) throws Exception {
              this(
                config,
                null,
                clusterId,
                new ExponentialWaitStrategy(10L, 2000L));
            }
          
            @VisibleForTesting
            RestClusterClient(
              Configuration configuration,
              @Nullable RestClient restClient,
              T clusterId,
              WaitStrategy waitStrategy) throws Exception {
              super(configuration);
          
              this.timeout = AkkaUtils.getClientTimeout(configuration);
          
              this.restClusterClientConfiguration = RestClusterClientConfiguration.fromConfiguration(configuration);
          
              if (restClient != null) {
                this.restClient = restClient;
              } else {
                this.restClient = new RestClient(restClusterClientConfiguration.getRestClientConfiguration(), executorService);
              }
          
              this.waitStrategy = Preconditions.checkNotNull(waitStrategy);
              this.clusterId = Preconditions.checkNotNull(clusterId);
          
              this.clientHAServices = HighAvailabilityServicesUtils.createClientHAService(configuration);
          
              this.webMonitorRetrievalService = clientHAServices.getClusterRestEndpointLeaderRetriever();
              this.retryExecutorService = Executors.newSingleThreadScheduledExecutor(new ExecutorThreadFactory("Flink-RestClusterClient-Retry"));
              startLeaderRetrievers();
            }
          
            private void startLeaderRetrievers() throws Exception {
              this.webMonitorRetrievalService.start(webMonitorLeaderRetriever);
            }
          
            @Override
            public void close() {
              ExecutorUtils.gracefulShutdown(restClusterClientConfiguration.getRetryDelay(), TimeUnit.MILLISECONDS, retryExecutorService);
          
              this.restClient.shutdown(Time.seconds(5));
              ExecutorUtils.gracefulShutdown(5, TimeUnit.SECONDS, this.executorService);
          
              try {
                webMonitorRetrievalService.stop();
              } catch (Exception e) {
                log.error("An error occurred during stopping the WebMonitorRetrievalService", e);
              }
          
              try {
                clientHAServices.close();
              } catch (Exception e) {
                log.error("An error occurred during stopping the ClientHighAvailabilityServices", e);
              }
          
              try {
                super.close();
              } catch (Exception e) {
                log.error("Error while closing the Cluster Client", e);
              }
            }
          
            @Override
            public JobSubmissionResult submitJob(JobGraph jobGraph, ClassLoader classLoader) throws ProgramInvocationException {
              log.info("Submitting job {} (detached: {}).", jobGraph.getJobID(), isDetached());
          
              final CompletableFuture<JobSubmissionResult> jobSubmissionFuture = submitJob(jobGraph);
          
              if (isDetached()) {
                try {
                  final JobSubmissionResult jobSubmissionResult = jobSubmissionFuture.get();
          
                  log.warn("Job was executed in detached mode, the results will be available on completion.");
          
                  this.lastJobExecutionResult = new DetachedJobExecutionResult(jobSubmissionResult.getJobID());
                  return lastJobExecutionResult;
                } catch (Exception e) {
                  throw new ProgramInvocationException("Could not submit job",
                    jobGraph.getJobID(), ExceptionUtils.stripExecutionException(e));
                }
              } else {
                final CompletableFuture<JobResult> jobResultFuture = jobSubmissionFuture.thenCompose(
                  ignored -> requestJobResult(jobGraph.getJobID()));
          
                final JobResult jobResult;
                try {
                  jobResult = jobResultFuture.get();
                } catch (Exception e) {
                  throw new ProgramInvocationException("Could not retrieve the execution result.",
                    jobGraph.getJobID(), ExceptionUtils.stripExecutionException(e));
                }
          
                try {
                  this.lastJobExecutionResult = jobResult.toJobExecutionResult(classLoader);
                  return lastJobExecutionResult;
                } catch (JobExecutionException e) {
                  throw new ProgramInvocationException("Job failed.", jobGraph.getJobID(), e);
                } catch (IOException | ClassNotFoundException e) {
                  throw new ProgramInvocationException("Job failed.", jobGraph.getJobID(), e);
                }
              }
            }
          
            /**
             * Requests the job details.
             *
             * @param jobId The job id
             * @return Job details
             */
            public CompletableFuture<JobDetailsInfo> getJobDetails(JobID jobId) {
              final JobDetailsHeaders detailsHeaders = JobDetailsHeaders.getInstance();
              final JobMessageParameters  params = new JobMessageParameters();
              params.jobPathParameter.resolve(jobId);
          
              return sendRequest(
                detailsHeaders,
                params);
            }
          
            @Override
            public CompletableFuture<JobStatus> getJobStatus(JobID jobId) {
              return getJobDetails(jobId).thenApply(JobDetailsInfo::getJobStatus);
            }
          
            /**
             * Requests the {@link JobResult} for the given {@link JobID}. The method retries multiple
             * times to poll the {@link JobResult} before giving up.
             *
             * @param jobId specifying the job for which to retrieve the {@link JobResult}
             * @return Future which is completed with the {@link JobResult} once the job has completed or
             * with a failure if the {@link JobResult} could not be retrieved.
             */
            @Override
            public CompletableFuture<JobResult> requestJobResult(@Nonnull JobID jobId) {
              return pollResourceAsync(
                () -> {
                  final JobMessageParameters messageParameters = new JobMessageParameters();
                  messageParameters.jobPathParameter.resolve(jobId);
                  return sendRequest(
                    JobExecutionResultHeaders.getInstance(),
                    messageParameters);
                });
            }
          
            /**
             * Submits the given {@link JobGraph} to the dispatcher.
             *
             * @param jobGraph to submit
             * @return Future which is completed with the submission response
             */
            @Override
            public CompletableFuture<JobSubmissionResult> submitJob(@Nonnull JobGraph jobGraph) {
              // we have to enable queued scheduling because slot will be allocated lazily
              jobGraph.setAllowQueuedScheduling(true);
          
              CompletableFuture<java.nio.file.Path> jobGraphFileFuture = CompletableFuture.supplyAsync(() -> {
                try {
                  final java.nio.file.Path jobGraphFile = Files.createTempFile("flink-jobgraph", ".bin");
                  try (ObjectOutputStream objectOut = new ObjectOutputStream(Files.newOutputStream(jobGraphFile))) {
                    objectOut.writeObject(jobGraph);
                  }
                  return jobGraphFile;
                } catch (IOException e) {
                  throw new CompletionException(new FlinkException("Failed to serialize JobGraph.", e));
                }
              }, executorService);
          
              CompletableFuture<Tuple2<JobSubmitRequestBody, Collection<FileUpload>>> requestFuture = jobGraphFileFuture.thenApply(jobGraphFile -> {
                List<String> jarFileNames = new ArrayList<>(8);
                List<JobSubmitRequestBody.DistributedCacheFile> artifactFileNames = new ArrayList<>(8);
                Collection<FileUpload> filesToUpload = new ArrayList<>(8);
          
                filesToUpload.add(new FileUpload(jobGraphFile, RestConstants.CONTENT_TYPE_BINARY));
          
                for (Path jar : jobGraph.getUserJars()) {
                  jarFileNames.add(jar.getName());
                  filesToUpload.add(new FileUpload(Paths.get(jar.toUri()), RestConstants.CONTENT_TYPE_JAR));
                }
          
                for (Map.Entry<String, DistributedCache.DistributedCacheEntry> artifacts : jobGraph.getUserArtifacts().entrySet()) {
                  artifactFileNames.add(new JobSubmitRequestBody.DistributedCacheFile(artifacts.getKey(), new Path(artifacts.getValue().filePath).getName()));
                  filesToUpload.add(new FileUpload(Paths.get(artifacts.getValue().filePath), RestConstants.CONTENT_TYPE_BINARY));
                }
          
                final JobSubmitRequestBody requestBody = new JobSubmitRequestBody(
                  jobGraphFile.getFileName().toString(),
                  jarFileNames,
                  artifactFileNames);
          
                return Tuple2.of(requestBody, Collections.unmodifiableCollection(filesToUpload));
              });
          
              final CompletableFuture<JobSubmitResponseBody> submissionFuture = requestFuture.thenCompose(
                requestAndFileUploads -> sendRetriableRequest(
                  JobSubmitHeaders.getInstance(),
                  EmptyMessageParameters.getInstance(),
                  requestAndFileUploads.f0,
                  requestAndFileUploads.f1,
                  isConnectionProblemOrServiceUnavailable())
              );
          
              submissionFuture
                .thenCombine(jobGraphFileFuture, (ignored, jobGraphFile) -> jobGraphFile)
                .thenAccept(jobGraphFile -> {
                try {
                  Files.delete(jobGraphFile);
                } catch (IOException e) {
                  log.warn("Could not delete temporary file {}.", jobGraphFile, e);
                }
              });
          
              return submissionFuture
                .thenApply(
                  (JobSubmitResponseBody jobSubmitResponseBody) -> new JobSubmissionResult(jobGraph.getJobID()))
                .exceptionally(
                  (Throwable throwable) -> {
                    throw new CompletionException(new JobSubmissionException(jobGraph.getJobID(), "Failed to submit JobGraph.", ExceptionUtils.stripCompletionException(throwable)));
                  });
            }
          
            @Override
            public void cancel(JobID jobID) throws Exception {
              JobCancellationMessageParameters params = new JobCancellationMessageParameters();
              params.jobPathParameter.resolve(jobID);
              params.terminationModeQueryParameter.resolve(Collections.singletonList(TerminationModeQueryParameter.TerminationMode.CANCEL));
              CompletableFuture<EmptyResponseBody> responseFuture = sendRequest(
                JobCancellationHeaders.getInstance(),
                params);
              responseFuture.get(timeout.toMillis(), TimeUnit.MILLISECONDS);
            }
          
            @Override
            public String stopWithSavepoint(
                final JobID jobId,
                final boolean advanceToEndOfTime,
                @Nullable final String savepointDirectory) throws Exception {
          
              final StopWithSavepointTriggerHeaders stopWithSavepointTriggerHeaders = StopWithSavepointTriggerHeaders.getInstance();
          
              final SavepointTriggerMessageParameters stopWithSavepointTriggerMessageParameters =
                  stopWithSavepointTriggerHeaders.getUnresolvedMessageParameters();
              stopWithSavepointTriggerMessageParameters.jobID.resolve(jobId);
          
              final CompletableFuture<TriggerResponse> responseFuture = sendRequest(
                  stopWithSavepointTriggerHeaders,
                  stopWithSavepointTriggerMessageParameters,
                  new StopWithSavepointRequestBody(savepointDirectory, advanceToEndOfTime));
          
              return responseFuture.thenCompose(savepointTriggerResponseBody -> {
                final TriggerId savepointTriggerId = savepointTriggerResponseBody.getTriggerId();
                return pollSavepointAsync(jobId, savepointTriggerId);
              }).thenApply(savepointInfo -> {
                if (savepointInfo.getFailureCause() != null) {
                  throw new CompletionException(savepointInfo.getFailureCause());
                }
                return savepointInfo.getLocation();
              }).get();
            }
          
            @Override
            public String cancelWithSavepoint(JobID jobId, @Nullable String savepointDirectory) throws Exception {
              return triggerSavepoint(jobId, savepointDirectory, true).get();
            }
          
            @Override
            public CompletableFuture<String> triggerSavepoint(
                final JobID jobId,
                final @Nullable String savepointDirectory) {
              return triggerSavepoint(jobId, savepointDirectory, false);
            }
          
            private CompletableFuture<String> triggerSavepoint(
                final JobID jobId,
                final @Nullable String savepointDirectory,
                final boolean cancelJob) {
              final SavepointTriggerHeaders savepointTriggerHeaders = SavepointTriggerHeaders.getInstance();
              final SavepointTriggerMessageParameters savepointTriggerMessageParameters =
                savepointTriggerHeaders.getUnresolvedMessageParameters();
              savepointTriggerMessageParameters.jobID.resolve(jobId);
          
              final CompletableFuture<TriggerResponse> responseFuture = sendRequest(
                savepointTriggerHeaders,
                savepointTriggerMessageParameters,
                new SavepointTriggerRequestBody(savepointDirectory, cancelJob));
          
              return responseFuture.thenCompose(savepointTriggerResponseBody -> {
                final TriggerId savepointTriggerId = savepointTriggerResponseBody.getTriggerId();
                return pollSavepointAsync(jobId, savepointTriggerId);
              }).thenApply(savepointInfo -> {
                if (savepointInfo.getFailureCause() != null) {
                  throw new CompletionException(savepointInfo.getFailureCause());
                }
                return savepointInfo.getLocation();
              });
            }
          
            @Override
            public Map<String, OptionalFailure<Object>> getAccumulators(final JobID jobID, ClassLoader loader) throws Exception {
              final JobAccumulatorsHeaders accumulatorsHeaders = JobAccumulatorsHeaders.getInstance();
              final JobAccumulatorsMessageParameters accMsgParams = accumulatorsHeaders.getUnresolvedMessageParameters();
              accMsgParams.jobPathParameter.resolve(jobID);
              accMsgParams.includeSerializedAccumulatorsParameter.resolve(Collections.singletonList(true));
          
              CompletableFuture<JobAccumulatorsInfo> responseFuture = sendRequest(
                accumulatorsHeaders,
                accMsgParams);
          
              Map<String, OptionalFailure<Object>> result = Collections.emptyMap();
          
              try {
                result = responseFuture.thenApply((JobAccumulatorsInfo accumulatorsInfo) -> {
                  try {
                    return AccumulatorHelper.deserializeAccumulators(
                      accumulatorsInfo.getSerializedUserAccumulators(),
                      loader);
                  } catch (Exception e) {
                    throw new CompletionException(
                      new FlinkException(
                        String.format("Deserialization of accumulators for job %s failed.", jobID),
                        e));
                  }
                }).get(timeout.toMillis(), TimeUnit.MILLISECONDS);
              } catch (ExecutionException ee) {
                ExceptionUtils.rethrowException(ExceptionUtils.stripExecutionException(ee));
              }
          
              return result;
            }
          
            private CompletableFuture<SavepointInfo> pollSavepointAsync(
                final JobID jobId,
                final TriggerId triggerID) {
              return pollResourceAsync(() -> {
                final SavepointStatusHeaders savepointStatusHeaders = SavepointStatusHeaders.getInstance();
                final SavepointStatusMessageParameters savepointStatusMessageParameters =
                  savepointStatusHeaders.getUnresolvedMessageParameters();
                savepointStatusMessageParameters.jobIdPathParameter.resolve(jobId);
                savepointStatusMessageParameters.triggerIdPathParameter.resolve(triggerID);
                return sendRequest(
                  savepointStatusHeaders,
                  savepointStatusMessageParameters);
              });
            }
          
            @Override
            public CompletableFuture<Collection<JobStatusMessage>> listJobs() {
              return sendRequest(JobsOverviewHeaders.getInstance())
                .thenApply(
                  (multipleJobsDetails) -> multipleJobsDetails
                    .getJobs()
                    .stream()
                    .map(detail -> new JobStatusMessage(
                      detail.getJobId(),
                      detail.getJobName(),
                      detail.getStatus(),
                      detail.getStartTime()))
                    .collect(Collectors.toList()));
            }
          
            @Override
            public T getClusterId() {
              return clusterId;
            }
          
            @Override
            public CompletableFuture<Acknowledge> disposeSavepoint(String savepointPath) {
              final SavepointDisposalRequest savepointDisposalRequest = new SavepointDisposalRequest(savepointPath);
          
              final CompletableFuture<TriggerResponse> savepointDisposalTriggerFuture = sendRequest(
                SavepointDisposalTriggerHeaders.getInstance(),
                savepointDisposalRequest);
          
              final CompletableFuture<AsynchronousOperationInfo> savepointDisposalFuture = savepointDisposalTriggerFuture.thenCompose(
                (TriggerResponse triggerResponse) -> {
                  final TriggerId triggerId = triggerResponse.getTriggerId();
                  final SavepointDisposalStatusHeaders savepointDisposalStatusHeaders = SavepointDisposalStatusHeaders.getInstance();
                  final SavepointDisposalStatusMessageParameters savepointDisposalStatusMessageParameters = savepointDisposalStatusHeaders.getUnresolvedMessageParameters();
                  savepointDisposalStatusMessageParameters.triggerIdPathParameter.resolve(triggerId);
          
                  return pollResourceAsync(
                    () -> sendRequest(
                      savepointDisposalStatusHeaders,
                      savepointDisposalStatusMessageParameters));
                });
          
              return savepointDisposalFuture.thenApply(
                (AsynchronousOperationInfo asynchronousOperationInfo) -> {
                  if (asynchronousOperationInfo.getFailureCause() == null) {
                    return Acknowledge.get();
                  } else {
                    throw new CompletionException(asynchronousOperationInfo.getFailureCause());
                  }
                });
            }
          
            @Override
            public void shutDownCluster() {
              try {
                sendRequest(ShutdownHeaders.getInstance()).get();
              } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
              } catch (ExecutionException e) {
                log.error("Error while shutting down cluster", e);
              }
            }
          
            /**
             * Creates a {@code CompletableFuture} that polls a {@code AsynchronouslyCreatedResource} until
             * its {@link AsynchronouslyCreatedResource#queueStatus() QueueStatus} becomes
             * {@link QueueStatus.Id#COMPLETED COMPLETED}. The future completes with the result of
             * {@link AsynchronouslyCreatedResource#resource()}.
             *
             * @param resourceFutureSupplier The operation which polls for the
             *                               {@code AsynchronouslyCreatedResource}.
             * @param <R>                    The type of the resource.
             * @param <A>                    The type of the {@code AsynchronouslyCreatedResource}.
             * @return A {@code CompletableFuture} delivering the resource.
             */
            private <R, A extends AsynchronouslyCreatedResource<R>> CompletableFuture<R> pollResourceAsync(
                final Supplier<CompletableFuture<A>> resourceFutureSupplier) {
              return pollResourceAsync(resourceFutureSupplier, new CompletableFuture<>(), 0);
            }
          
            private <R, A extends AsynchronouslyCreatedResource<R>> CompletableFuture<R> pollResourceAsync(
                final Supplier<CompletableFuture<A>> resourceFutureSupplier,
                final CompletableFuture<R> resultFuture,
                final long attempt) {
          
              resourceFutureSupplier.get().whenComplete((asynchronouslyCreatedResource, throwable) -> {
                if (throwable != null) {
                  resultFuture.completeExceptionally(throwable);
                } else {
                  if (asynchronouslyCreatedResource.queueStatus().getId() == QueueStatus.Id.COMPLETED) {
                    resultFuture.complete(asynchronouslyCreatedResource.resource());
                  } else {
                    retryExecutorService.schedule(() -> {
                      pollResourceAsync(resourceFutureSupplier, resultFuture, attempt + 1);
                    }, waitStrategy.sleepTime(attempt), TimeUnit.MILLISECONDS);
                  }
                }
              });
          
              return resultFuture;
            }
          
            // ======================================
            // Legacy stuff we actually implement
            // ======================================
          
            @Override
            public String getWebInterfaceURL() {
              try {
                return getWebMonitorBaseUrl().get().toString();
              } catch (InterruptedException | ExecutionException e) {
                ExceptionUtils.checkInterrupted(e);
          
                log.warn("Could not retrieve the web interface URL for the cluster.", e);
                return "Unknown address.";
              }
            }
          
            //-------------------------------------------------------------------------
            // RestClient Helper
            //-------------------------------------------------------------------------
          
            private <M extends MessageHeaders<EmptyRequestBody, P, U>, U extends MessageParameters, P extends ResponseBody> CompletableFuture<P>
                sendRequest(M messageHeaders, U messageParameters) {
              return sendRequest(messageHeaders, messageParameters, EmptyRequestBody.getInstance());
            }
          
            private <M extends MessageHeaders<R, P, EmptyMessageParameters>, R extends RequestBody, P extends ResponseBody> CompletableFuture<P>
                sendRequest(M messageHeaders, R request) {
              return sendRequest(messageHeaders, EmptyMessageParameters.getInstance(), request);
            }
          
            @VisibleForTesting
            <M extends MessageHeaders<EmptyRequestBody, P, EmptyMessageParameters>, P extends ResponseBody> CompletableFuture<P>
                sendRequest(M messageHeaders) {
              return sendRequest(messageHeaders, EmptyMessageParameters.getInstance(), EmptyRequestBody.getInstance());
            }
          
            @VisibleForTesting
            public <M extends MessageHeaders<R, P, U>, U extends MessageParameters, R extends RequestBody, P extends ResponseBody> CompletableFuture<P>
                sendRequest(M messageHeaders, U messageParameters, R request) {
              return sendRetriableRequest(
                messageHeaders, messageParameters, request, isConnectionProblemOrServiceUnavailable());
            }
          
            private <M extends MessageHeaders<R, P, U>, U extends MessageParameters, R extends RequestBody, P extends ResponseBody> CompletableFuture<P>
                sendRetriableRequest(M messageHeaders, U messageParameters, R request, Predicate<Throwable> retryPredicate) {
              return sendRetriableRequest(messageHeaders, messageParameters, request, Collections.emptyList(), retryPredicate);
            }
          
            private <M extends MessageHeaders<R, P, U>, U extends MessageParameters, R extends RequestBody, P extends ResponseBody> CompletableFuture<P>
            sendRetriableRequest(M messageHeaders, U messageParameters, R request, Collection<FileUpload> filesToUpload, Predicate<Throwable> retryPredicate) {
              return retry(() -> getWebMonitorBaseUrl().thenCompose(webMonitorBaseUrl -> {
                try {
                  return restClient.sendRequest(webMonitorBaseUrl.getHost(), webMonitorBaseUrl.getPort(), messageHeaders, messageParameters, request, filesToUpload);
                } catch (IOException e) {
                  throw new CompletionException(e);
                }
              }), retryPredicate);
            }
          
            private <C> CompletableFuture<C> retry(
                CheckedSupplier<CompletableFuture<C>> operation,
                Predicate<Throwable> retryPredicate) {
              return FutureUtils.retryWithDelay(
                CheckedSupplier.unchecked(operation),
                restClusterClientConfiguration.getRetryMaxAttempts(),
                Time.milliseconds(restClusterClientConfiguration.getRetryDelay()),
                retryPredicate,
                new ScheduledExecutorServiceAdapter(retryExecutorService));
            }
          
            private static Predicate<Throwable> isConnectionProblemOrServiceUnavailable() {
              return isConnectionProblemException().or(isServiceUnavailable());
            }
          
            private static Predicate<Throwable> isConnectionProblemException() {
              return (throwable) ->
                ExceptionUtils.findThrowable(throwable, java.net.ConnectException.class).isPresent() ||
                  ExceptionUtils.findThrowable(throwable, java.net.SocketTimeoutException.class).isPresent() ||
                  ExceptionUtils.findThrowable(throwable, ConnectTimeoutException.class).isPresent() ||
                  ExceptionUtils.findThrowable(throwable, IOException.class).isPresent();
            }
          
            private static Predicate<Throwable> isServiceUnavailable() {
              return httpExceptionCodePredicate(code -> code == HttpResponseStatus.SERVICE_UNAVAILABLE.code());
            }
          
            private static Predicate<Throwable> httpExceptionCodePredicate(Predicate<Integer> statusCodePredicate) {
              return (throwable) -> ExceptionUtils.findThrowable(throwable, RestClientException.class)
                .map(restClientException -> {
                  final int code = restClientException.getHttpResponseStatus().code();
                  return statusCodePredicate.test(code);
                })
                .orElse(false);
            }
          
            @VisibleForTesting
            CompletableFuture<URL> getWebMonitorBaseUrl() {
              return FutureUtils.orTimeout(
                  webMonitorLeaderRetriever.getLeaderFuture(),
                  restClusterClientConfiguration.getAwaitLeaderTimeout(),
                  TimeUnit.MILLISECONDS)
                .thenApplyAsync(leaderAddressSessionId -> {
                  final String url = leaderAddressSessionId.f0;
                  try {
                    return new URL(url);
                  } catch (MalformedURLException e) {
                    throw new IllegalArgumentException("Could not parse URL from " + url, e);
                  }
                }, executorService);
            }
          }
          
          • RestClusterClientConfiguration 其余集群客户端配置
          package org.apache.flink.client.program.rest;
          
          import org.apache.flink.configuration.Configuration;
          import org.apache.flink.configuration.RestOptions;
          import org.apache.flink.runtime.rest.RestClientConfiguration;
          import org.apache.flink.util.ConfigurationException;
          import org.apache.flink.util.Preconditions;
          
          import static org.apache.flink.util.Preconditions.checkArgument;
          
          /**
           * A configuration object for {@link RestClusterClient}s.
           */
          public final class RestClusterClientConfiguration {
          
            private final RestClientConfiguration restClientConfiguration;
          
            private final long awaitLeaderTimeout;
          
            private final int retryMaxAttempts;
          
            private final long retryDelay;
          
            private RestClusterClientConfiguration(
                final RestClientConfiguration endpointConfiguration,
                final long awaitLeaderTimeout,
                final int retryMaxAttempts,
                final long retryDelay) {
              checkArgument(awaitLeaderTimeout >= 0, "awaitLeaderTimeout must be equal to or greater than 0");
              checkArgument(retryMaxAttempts >= 0, "retryMaxAttempts must be equal to or greater than 0");
              checkArgument(retryDelay >= 0, "retryDelay must be equal to or greater than 0");
          
              this.restClientConfiguration = Preconditions.checkNotNull(endpointConfiguration);
              this.awaitLeaderTimeout = awaitLeaderTimeout;
              this.retryMaxAttempts = retryMaxAttempts;
              this.retryDelay = retryDelay;
            }
          
            public RestClientConfiguration getRestClientConfiguration() {
              return restClientConfiguration;
            }
          
            /**
             * @see RestOptions#AWAIT_LEADER_TIMEOUT
             */
            public long getAwaitLeaderTimeout() {
              return awaitLeaderTimeout;
            }
          
            /**
             * @see RestOptions#RETRY_MAX_ATTEMPTS
             */
            public int getRetryMaxAttempts() {
              return retryMaxAttempts;
            }
          
            /**
             * @see RestOptions#RETRY_DELAY
             */
            public long getRetryDelay() {
              return retryDelay;
            }
          
            public static RestClusterClientConfiguration fromConfiguration(Configuration config) throws ConfigurationException {
              RestClientConfiguration restClientConfiguration = RestClientConfiguration.fromConfiguration(config);
          
              final long awaitLeaderTimeout = config.getLong(RestOptions.AWAIT_LEADER_TIMEOUT);
              final int retryMaxAttempts = config.getInteger(RestOptions.RETRY_MAX_ATTEMPTS);
              final long retryDelay = config.getLong(RestOptions.RETRY_DELAY);
          
              return new RestClusterClientConfiguration(restClientConfiguration, awaitLeaderTimeout, retryMaxAttempts, retryDelay);
            }
          }
          
          • retry 重试

            • WaitStrategy 等待策略
            package org.apache.flink.client.program.rest.retry;
            
            /**
             *
             * Operations that are polling for a result to arrive require a waiting time 
             * between consecutive polls. A {@code WaitStrategy} determines this waiting 
             * time.
             * 轮询等待结果的操作 需要一个 在 相邻 轮询时间 的 等待时间
             * 等待策略决定了等待时间 
             *  
             */
            @FunctionalInterface
            public interface WaitStrategy {
            
              /**
               * Returns the time to wait until the next attempt. Attempts start at {@code 0
               * }.  返回直到下一次进入的等待时长
               * @param attempt The number of the last attempt.
               * @return Waiting time in ms.
               */
              long sleepTime(long attempt);
            
            }
            
            • ExponentialWaitStrategy 指数级的等待策略
            package org.apache.flink.client.program.rest.retry;
            
            import static org.apache.flink.util.Preconditions.checkArgument;
            
            /**
             * {@link WaitStrategy} with exponentially increasing sleep time.
             * 等待机制: 指数级的增长 睡眠时间
             */
            public class ExponentialWaitStrategy implements WaitStrategy {
            
              private final long initialWait;
            
              private final long maxWait;
            
              public ExponentialWaitStrategy(final long initialWait, final long maxWait) {
                checkArgument(initialWait > 0, "initialWait must be positive, was %s", initialWait);
                checkArgument(maxWait > 0, "maxWait must be positive, was %s", maxWait);
                checkArgument(initialWait <= maxWait, "initialWait must be lower than or equal to maxWait", maxWait);
                this.initialWait = initialWait;
                this.maxWait = maxWait;
              }
            
              @Override
              public long sleepTime(final long attempt) {
                checkArgument(attempt >= 0, "attempt must not be negative (%s)", attempt);
                final long exponentialSleepTime = initialWait * Math.round(Math.pow(2, attempt));
                return exponentialSleepTime >= 0 && exponentialSleepTime < maxWait ? exponentialSleepTime : maxWait;
              }
            }
            
  • flink-connectors

  • flink-container

  • flink-contrib

  • flink-core

  • flink-dist

  • flink-docs

  • flink-end-toend-tests

  • flink-examples

  • flink-filesystems

  • flink-formats

  • flink-fs-tests

  • flink-java

  • flink-jepsen

  • flink-libraries

  • flink-mesos

  • flink-metrics

  • flink-ml-parent

  • flink-optimizer

  • flink-python

  • flink-queryable-state

  • flink-quickstart

  • flink-runtime

  • flink-runtime-web

  • flink-scala

  • flink-scala-shell

  • flink-shaded-curator

  • flink-state-backends

  • flink-streaming-java

  • flink-streaming-scala

  • flink-table

  • flink-tests

  • flink-test-utils-parent

  • flink-walkthroughs

  • flink-yarn

  • flink-yarn-tests

posted @ 2019-09-28 00:58  wellDoneGaben  阅读(2076)  评论(0编辑  收藏  举报