hadoop ha 读取 activce状态的活动节点

方式一

方式二

package com.yanheap.hadoop;

import com.alibaba.druid.util.StringUtils;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HAUtil;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;

/**
 * @Desription:
 * @CreatedDate: 2018/1/5 14:48
 * @Author:wangel.lee
 */
public class ZookeeperActive {

    private static String clusterName = "nameservice1";
    private static final String HADOOP_URL = "hdfs://"+clusterName;
    private static Configuration conf;
    static{
        conf = new Configuration();
        conf = new Configuration();
        conf.set("fs.defaultFS", HADOOP_URL);
        conf.set("dfs.nameservices", clusterName);
        conf.set("dfs.ha.namenodes."+clusterName, "nn1,nn2");
        // 高可用的配置:当其中一个变成standy时,打印异常,并自动切换到另一个namedata去取数据
        conf.set("dfs.namenode.rpc-address."+clusterName+".nn1", "10.1.1.12:8020");
        conf.set("dfs.namenode.rpc-address."+clusterName+".nn2", "10.1.1.11:8020");
        //conf.setBoolean(name, value);
        conf.set("dfs.client.failover.proxy.provider."+clusterName,
                "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
    }

    /**
     * 读取ha的active节点
     * @throws IOException
     */
    private static void getActive() throws IOException {
        FileSystem fileSystem = FileSystem.get(URI.create(HADOOP_URL), conf);

        long l1 = System.currentTimeMillis();

        InetSocketAddress active = null;
        //try {
            active = HAUtil.getAddressOfActive(fileSystem);
        //} catch ( Exception e ){
            //e.printStackTrace();
        //}
        InetAddress address = active.getAddress();
        String hivePath = "hdfs://" + address.getHostAddress() + ":" + active.getPort();

        System.out.println("-------------------------------------->"+hivePath);
        System.out.println("-------------------------------------->"+(System.currentTimeMillis()-l1));
    }

    /**
     * 读取hdfs中的文件
     * @throws IOException
     */
    private static void readFile() throws IOException {
        FileSystem fileSystem = FileSystem.get(URI.create(HADOOP_URL), conf);
        FSDataInputStream in = null;
        BufferedReader br = null;

        try {
            fileSystem = FileSystem.get(conf);
            Path srcPath = new Path("/tmp/media_info.csv");
            in = fileSystem.open(srcPath);
            br = new BufferedReader(new InputStreamReader(in));
            String line = null;
            while (null != (line = br.readLine())) {
                if (!StringUtils.isEmpty(line)) {
                    System.out.println("-------->:"+line);
                }
            }
        } catch (IllegalArgumentException e) {
            e.printStackTrace();
        } catch (Exception e) {
            throw e;
        } finally {
            br.close();
            fileSystem.close();
        }

    }

    public static void main(String[] args)throws IOException  {
        //getActive();
        readFile();
    }

}

另,本地windows执行hadoop需要环境变量的支持,如下提供hadoop命令下载包,下载后直接添加环境变量对他的引用

链接:https://pan.baidu.com/s/1eRIHmdO 密码:ly38

Original: https://www.cnblogs.com/hwaggLee/p/8204121.html
Author: 243573295
Title: hadoop ha 读取 activce状态的活动节点

原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/7179/

转载文章受原作者版权保护。转载请注明原作者出处!

(0)

大家都在看

免费咨询
免费咨询
扫码关注
扫码关注
联系站长

站长Johngo!

大数据和算法重度研究者!

持续产出大数据、算法、LeetCode干货,以及业界好资源!

2022012703491714

微信来撩,免费咨询:xiaozhu_tec

分享本页
返回顶部