master
parent
14a5b727b6
commit
1e38cc29f3
@ -0,0 +1,27 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<classpath>
|
||||
<classpathentry kind="src" output="target/classes" path="src/main/java">
|
||||
<attributes>
|
||||
<attribute name="optional" value="true"/>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="src" output="target/test-classes" path="src/test/java">
|
||||
<attributes>
|
||||
<attribute name="optional" value="true"/>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry including="**/*.java" kind="src" path="src/main/resources"/>
|
||||
<classpathentry kind="con" path="org.eclipse.m2e.MAVEN2_CLASSPATH_CONTAINER">
|
||||
<attributes>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6">
|
||||
<attributes>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="output" path="target/classes"/>
|
||||
</classpath>
|
@ -0,0 +1 @@
|
||||
/target
|
@ -0,0 +1,23 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>logcount</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.eclipse.jdt.core.javabuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
<buildCommand>
|
||||
<name>org.eclipse.m2e.core.maven2Builder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.eclipse.jdt.core.javanature</nature>
|
||||
<nature>org.eclipse.m2e.core.maven2Nature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
@ -0,0 +1,5 @@
|
||||
eclipse.preferences.version=1
|
||||
encoding//src/main/java=UTF-8
|
||||
encoding//src/main/resources=UTF-8
|
||||
encoding//src/test/java=UTF-8
|
||||
encoding/<project>=UTF-8
|
@ -0,0 +1,5 @@
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
|
||||
org.eclipse.jdt.core.compiler.compliance=1.6
|
||||
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
|
||||
org.eclipse.jdt.core.compiler.source=1.6
|
@ -0,0 +1,4 @@
|
||||
activeProfiles=
|
||||
eclipse.preferences.version=1
|
||||
resolveWorkspaceProjects=true
|
||||
version=1
|
After Width: | Height: | Size: 53 KiB |
Binary file not shown.
Binary file not shown.
@ -0,0 +1,87 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.wankun</groupId>
|
||||
<artifactId>logcount</artifactId>
|
||||
<version>1.0</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>logcount</name>
|
||||
<url>http://maven.apache.org</url>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>cloudera</id>
|
||||
<url>https://repository.cloudera.com/cloudera/cloudera-repos</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<spark.version>1.1.0-cdh5.2.0</spark.version>
|
||||
<hbase.version>0.98.6-cdh5.2.0</hbase.version>
|
||||
</properties>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>jdk.tools</groupId>
|
||||
<artifactId>jdk.tools</artifactId>
|
||||
<version>1.7.0_51</version>
|
||||
<scope>system</scope>
|
||||
<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>ch.qos.logback</groupId>
|
||||
<artifactId>logback-classic</artifactId>
|
||||
<version>1.1.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.10</artifactId>
|
||||
<version>0.8.1.1</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-streaming_2.10</artifactId>
|
||||
<version>${spark.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-streaming-kafka_2.10</artifactId>
|
||||
<version>${spark.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-client</artifactId>
|
||||
<version>${hbase.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hbase</groupId>
|
||||
<artifactId>hbase-server</artifactId>
|
||||
<version>${hbase.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.11</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.1</version>
|
||||
<configuration>
|
||||
<source>1.6</source>
|
||||
<target>1.6</target>
|
||||
<encoding>UTF-8</encoding>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
@ -0,0 +1,54 @@
|
||||
package com.wankun.logcount.kafka;
|
||||
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Date;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import kafka.javaapi.producer.Producer;
|
||||
import kafka.producer.KeyedMessage;
|
||||
import kafka.producer.ProducerConfig;
|
||||
|
||||
public class MsgSender extends Thread {
|
||||
private final static Logger logger = LoggerFactory.getLogger(MsgSender.class);
|
||||
|
||||
private SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMdd HH:mm:ss");
|
||||
|
||||
private BlockingQueue<String> queue;
|
||||
private Producer<String, String> producer;
|
||||
|
||||
public MsgSender(BlockingQueue<String> queue) {
|
||||
this.queue = queue;
|
||||
|
||||
Properties props = new Properties();
|
||||
props.put("metadata.broker.list", "10.10.102.191:9092,10.10.102.192:9092,10.10.102.193:9092");
|
||||
props.put("serializer.class", "kafka.serializer.StringEncoder");
|
||||
// props.put("partitioner.class", "example.producer.SimplePartitioner");
|
||||
props.put("request.required.acks", "1");
|
||||
|
||||
ProducerConfig config = new ProducerConfig(props);
|
||||
|
||||
producer = new Producer<String, String>(config);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
while (true) {
|
||||
try {
|
||||
String line = queue.take();
|
||||
if (line != null && !line.replace("\n", "").replace("\r", "").equals("")) {
|
||||
String timestamp = sdf.format(new Date());
|
||||
KeyedMessage<String, String> data = new KeyedMessage<String, String>("recsys", timestamp, line);
|
||||
logger.debug("sending kv :( {}:{})", timestamp, line);
|
||||
producer.send(data);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("kafka producer 消息发送失败", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,132 @@
|
||||
package com.wankun.logcount.kafka;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.nio.CharBuffer;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class TailLog extends Thread {
|
||||
private final static Logger logger = LoggerFactory.getLogger(TailLog.class);
|
||||
|
||||
private BlockingQueue<String> queue;
|
||||
private String logname;
|
||||
|
||||
private CharBuffer buf = CharBuffer.allocate(4096);
|
||||
|
||||
// private ByteBuffer buf = ByteBuffer.allocate(4096);
|
||||
|
||||
public TailLog(BlockingQueue<String> queue, String logname) {
|
||||
this.queue = queue;
|
||||
this.logname = logname;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
BufferedReader reader = null;
|
||||
try {
|
||||
// Path logpath=Paths.get(logname);
|
||||
// File posfile =
|
||||
// logpath.getParent().resolve("."+logpath.getFileName()+".pos").toFile();
|
||||
reader = new BufferedReader(new FileReader(new File(logname)));
|
||||
|
||||
long filesize = 0;
|
||||
while (true) {
|
||||
// 判断文件是否已经切换
|
||||
if (filesize > new File(logname).length()) {
|
||||
logger.debug("filesize :{} current system file size :{} . Log file switchover!", filesize,
|
||||
new File(logname).length());
|
||||
try {
|
||||
// 在切换读文件前,读取文件全部内容
|
||||
StringBuilder line = new StringBuilder();
|
||||
while (reader.read(buf) > 0) {
|
||||
buf.flip();
|
||||
synchronized (buf) {
|
||||
// 读buffer 并解析
|
||||
for (int i = 0; i < buf.limit(); i++) {
|
||||
char c = buf.get();
|
||||
line.append(c);
|
||||
if ((c == '\n') || (c == '\r'))
|
||||
if (line.length() > 0) {
|
||||
queue.put(line.toString());
|
||||
line = new StringBuilder();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
queue.put(line.toString());
|
||||
buf.clear();
|
||||
|
||||
// 切换读文件
|
||||
if (reader != null)
|
||||
reader.close();
|
||||
reader = new BufferedReader(new FileReader(new File(logname)));
|
||||
} catch (Exception e) {
|
||||
logger.error("文件 {} 不存在", logname, e);
|
||||
Thread.currentThread().sleep(10000);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for (int retrys = 10; retrys > 0; retrys--) {
|
||||
int bufread = reader.read(buf);
|
||||
if (bufread < 0) {
|
||||
if (retrys > 0)
|
||||
Thread.currentThread().sleep(1000);
|
||||
else {
|
||||
// 等待10s后无新数据读出
|
||||
synchronized (buf) {
|
||||
// 等待 cachetime 秒后文件仍未写入
|
||||
buf.flip();
|
||||
char[] dst = new char[buf.length()];
|
||||
buf.get(dst);
|
||||
buf.clear();
|
||||
queue.put(new String(dst));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filesize = new File(logname).length();
|
||||
retrys = -1;
|
||||
|
||||
buf.flip();
|
||||
synchronized (buf) {
|
||||
// 读buffer 并解析
|
||||
StringBuilder line = new StringBuilder();
|
||||
for (int i = 0; i < buf.limit(); i++) {
|
||||
char c = buf.get();
|
||||
line.append(c);
|
||||
if ((c == '\n') || (c == '\r'))
|
||||
if (line.length() > 0) {
|
||||
queue.put(line.toString());
|
||||
line = new StringBuilder();
|
||||
}
|
||||
}
|
||||
// 接着写不完整的数据
|
||||
buf.compact();
|
||||
if (line.length() > 0) {
|
||||
buf.append(line);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("文件读取失败", e);
|
||||
} finally {
|
||||
if (reader != null) {
|
||||
try {
|
||||
reader.close();
|
||||
} catch (IOException e) {
|
||||
logger.error("文件 reader 关闭失败", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
package com.wankun.logcount.kafka;
|
||||
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class TailService {
|
||||
|
||||
private final static Logger logger = LoggerFactory.getLogger(TailService.class);
|
||||
|
||||
public static void main(String[] args) {
|
||||
if (args.length < 1) {
|
||||
logger.error("Usage : TailService [logfile]");
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
BlockingQueue<String> queue = new ArrayBlockingQueue<String>(10000);
|
||||
|
||||
for (String arg : args) {
|
||||
new TailLog(queue, arg).start();
|
||||
}
|
||||
|
||||
new MsgSender(queue).start();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,49 @@
|
||||
package com.wankun.logcount.spark;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.http.HttpServlet;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.filter.PageFilter;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
|
||||
public class RecsysLogs extends HttpServlet {
|
||||
|
||||
private static final long serialVersionUID = 4289573629015709424L;
|
||||
|
||||
@Override
|
||||
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
|
||||
HTableInterface htable = (HTableInterface) getServletContext().getAttribute("htable");
|
||||
Scan scan = new Scan();
|
||||
scan.addColumn(Bytes.toBytes("f"), Bytes.toBytes("nums"));
|
||||
scan.setReversed(true);
|
||||
// scan.setMaxResultSize(20);
|
||||
scan.setFilter(new PageFilter(20));
|
||||
ResultScanner scanner = htable.getScanner(scan);
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (Result res : scanner) {
|
||||
Cell cell = res.getColumnLatestCell(Bytes.toBytes("f"), Bytes.toBytes("nums"));
|
||||
Long nums = Bytes.toLong(CellUtil.cloneValue(cell));
|
||||
String key = Bytes.toString(CellUtil.cloneRow(cell));
|
||||
sb.append(key + " : " + nums + "\n");
|
||||
}
|
||||
scanner.close();
|
||||
|
||||
resp.getWriter().write(sb.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
|
||||
this.doGet(req, resp);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,214 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>hbase.cluster.distributed</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.rootdir</name>
|
||||
<value>hdfs://dwztgame/hbase</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.max.xcievers</name>
|
||||
<value>40960</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.zookeeper.quorum</name>
|
||||
<value>node01.dw.ztgame.com:2181,node02.dw.ztgame.com:2181,node03.dw.ztgame.com:2181</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.regionserver.handler.count</name>
|
||||
<value>200</value>
|
||||
<description>Count of RPC Server instances spun up on RegionServers
|
||||
Same property is used by the Master for count of master handlers.
|
||||
Default is 10.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.regionserver.flushlogentries</name>
|
||||
<value>500</value>
|
||||
<description>Sync the HLog to HDFS when it has accumulated this many
|
||||
entries. Default 1. Value is checked on every HLog.hflush</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.regionserver.optionallogflushinterval</name>
|
||||
<value>2000</value>
|
||||
<description>Sync the HLog to the HDFS after this interval if it has not
|
||||
accumulated enough entries to trigger a sync. Default 1 second. Units:
|
||||
milliseconds. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.regionserver.thread.splitcompactcheckfrequency</name>
|
||||
<value>600000</value>
|
||||
<description>How often a region server runs the split/compaction check. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.regions.slop</name>
|
||||
<value>0</value>
|
||||
<description>Rebalance if any regionserver has average + (average * slop) regions.
|
||||
Default is 0% slop. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.server.thread.wakefrequency</name>
|
||||
<value>5000</value>
|
||||
<description>Time to sleep in between searches for work (in milliseconds).
|
||||
Used as sleep interval by service threads such as log roller. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.hregion.memstore.flush.size</name>
|
||||
<value>134217728</value>
|
||||
<description>Memstore will be flushed to disk if size of the memstore
|
||||
exceeds this number of bytes. Value is checked by a thread that runs
|
||||
every hbase.server.thread.wakefrequency.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.hregion.memstore.block.multiplier</name>
|
||||
<value>6</value>
|
||||
<description>
|
||||
Block updates if memstore has hbase.hregion.block.memstore
|
||||
time hbase.hregion.flush.size bytes. Useful preventing
|
||||
runaway memstore during spikes in update traffic. Without an
|
||||
upper-bound, memstore fills such that when it flushes the
|
||||
resultant flush files take a long time to compact or split, or
|
||||
worse, we OOME. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.hregion.memstore.mslab.enabled</name>
|
||||
<value>true</value>
|
||||
<description> Experimental: Enables the MemStore-Local Allocation Buffer,
|
||||
a feature which works to prevent heap fragmentation under
|
||||
heavy write loads. This can reduce the frequency of stop-the-world
|
||||
GC pauses on large heaps.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hfile.block.cache.size</name>
|
||||
<value>0.2</value>
|
||||
<description> Percentage of maximum heap (-Xmx setting) to allocate to block cache
|
||||
used by HFile/StoreFile. Default of 0.2 means allocate 20%.
|
||||
Set to 0 to disable. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.regionserver.nbreservationblocks</name>
|
||||
<value>10</value>
|
||||
<description>The number of resevoir blocks of memory release on
|
||||
OOME so we can cleanup properly before server shutdown.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.regionserver.global.memstore.upperLimit</name>
|
||||
<value>0.5</value>
|
||||
<description>Maximum size of all memstores in a region server before new
|
||||
updates are blocked and flushes are forced. Defaults to 40% of heap</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.regionserver.global.memstore.lowerLimit</name>
|
||||
<value>0.4</value>
|
||||
<description>When memstores are being forced to flush to make room in
|
||||
memory, keep flushing until we hit this mark. Defaults to 35% of heap.
|
||||
This value equaggl to hbase.regionserver.global.memstore.upperLimit causes
|
||||
the minimum possible flushing to occur when updates are blocked due to
|
||||
memstore limiting.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.hregion.max.filesize</name>
|
||||
<value>2684354560</value>
|
||||
<description>
|
||||
Maximum HStoreFile size. If any one of a column families' HStoreFiles has
|
||||
grown to exceed this value, the hosting HRegion is split in two.
|
||||
Default: 256M.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.snapshot.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.regionserver.regionSplitLimit</name>
|
||||
<value>200</value>
|
||||
<description>Limit for the number of regions after which no more region
|
||||
splitting should take place. This is not a hard limit for the number of
|
||||
regions but acts as a guideline for the regionserver to stop splitting after
|
||||
a certain limit. Default is set to MAX_INT; i.e. do not block splitting.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.hstore.compactionThreshold</name>
|
||||
<value>4</value>
|
||||
<description>If more than this number of HStoreFiles in any one HStore
|
||||
(one HStoreFile is written per flush of memstore) then a compaction
|
||||
is run to rewrite all HStoreFiles files as one. Larger numbers
|
||||
put off compaction but when it runs, it takes longer to complete. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.hstore.blockingStoreFiles</name>
|
||||
<value>12</value>
|
||||
<description>If more than this number of StoreFiles in any one Store
|
||||
(one StoreFile is written per flush of MemStore) then updates are
|
||||
blocked for this HRegion until a compaction is completed, or
|
||||
until hbase.hstore.blockingWaitTime has been exceeded. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.hstore.compaction.max</name>
|
||||
<value>6</value>
|
||||
<description>Max number of HStoreFiles to compact per 'minor' compaction.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.hregion.majorcompaction</name>
|
||||
<value>172800000</value>
|
||||
<description>The time (in miliseconds) between 'major' compactions of all
|
||||
HStoreFiles in a region. Default: 1 day.
|
||||
.set to 0 to disable automated major compactions. </description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.storefile.bloom.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.replication</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
@ -0,0 +1,11 @@
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
<title>recsys logs 监控系统</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<h2>欢迎使用recsys logs 监控系统!<h2> <br>
|
||||
<a href="/monitor">查看日志最新情况</a>
|
||||
</body>
|
||||
</html>
|
Loading…
Reference in new issue