001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import java.io.IOException; 021import java.util.ArrayList; 022import java.util.Arrays; 023import java.util.Collection; 024import java.util.Collections; 025import java.util.HashMap; 026import java.util.HashSet; 027import java.util.List; 028import java.util.Map; 029import java.util.Objects; 030import java.util.Optional; 031import java.util.Set; 032import java.util.TreeMap; 033import java.util.TreeSet; 034import java.util.function.BiPredicate; 035import java.util.function.Function; 036import java.util.regex.Matcher; 037import java.util.regex.Pattern; 038import java.util.stream.Collectors; 039import org.apache.hadoop.hbase.Coprocessor; 040import org.apache.hadoop.hbase.HConstants; 041import org.apache.hadoop.hbase.TableName; 042import org.apache.hadoop.hbase.exceptions.DeserializationException; 043import org.apache.hadoop.hbase.exceptions.HBaseException; 044import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.hadoop.hbase.util.PrettyPrinter; 047import org.apache.yetus.audience.InterfaceAudience; 048import org.slf4j.Logger; 049import org.slf4j.LoggerFactory; 050 051import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 052import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 053 054/** 055 * Convenience class for composing an instance of {@link TableDescriptor}. 056 * @since 2.0.0 057 */ 058@InterfaceAudience.Public 059public class TableDescriptorBuilder { 060 public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class); 061 @InterfaceAudience.Private 062 public static final String SPLIT_POLICY = "SPLIT_POLICY"; 063 private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); 064 /** 065 * Used by HBase Shell interface to access this metadata attribute which denotes the maximum size 066 * of the store file after which a region split occurs. 067 */ 068 @InterfaceAudience.Private 069 public static final String MAX_FILESIZE = "MAX_FILESIZE"; 070 private static final Bytes MAX_FILESIZE_KEY = new Bytes(Bytes.toBytes(MAX_FILESIZE)); 071 072 /** 073 * Used by rest interface to access this metadata attribute which denotes if the table is Read 074 * Only. 075 */ 076 @InterfaceAudience.Private 077 public static final String READONLY = "READONLY"; 078 private static final Bytes READONLY_KEY = new Bytes(Bytes.toBytes(READONLY)); 079 080 /** 081 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 082 * compaction enabled. 083 */ 084 @InterfaceAudience.Private 085 public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED"; 086 private static final Bytes COMPACTION_ENABLED_KEY = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); 087 088 /** 089 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 090 * split enabled. 091 */ 092 @InterfaceAudience.Private 093 public static final String SPLIT_ENABLED = "SPLIT_ENABLED"; 094 private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED)); 095 096 /** 097 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 098 * merge enabled. 099 */ 100 @InterfaceAudience.Private 101 public static final String MERGE_ENABLED = "MERGE_ENABLED"; 102 private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED)); 103 104 /** 105 * Used by HBase Shell interface to access this metadata attribute which represents the maximum 106 * size of the memstore after which its contents are flushed onto the disk. 107 */ 108 @InterfaceAudience.Private 109 public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; 110 private static final Bytes MEMSTORE_FLUSHSIZE_KEY = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); 111 112 @InterfaceAudience.Private 113 public static final String FLUSH_POLICY = "FLUSH_POLICY"; 114 private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); 115 /** 116 * Used by rest interface to access this metadata attribute which denotes if it is a catalog 117 * table, either <code> hbase:meta </code>. 118 */ 119 @InterfaceAudience.Private 120 public static final String IS_META = "IS_META"; 121 private static final Bytes IS_META_KEY = new Bytes(Bytes.toBytes(IS_META)); 122 123 /** 124 * {@link Durability} setting for the table. 125 */ 126 @InterfaceAudience.Private 127 public static final String DURABILITY = "DURABILITY"; 128 private static final Bytes DURABILITY_KEY = new Bytes(Bytes.toBytes("DURABILITY")); 129 130 /** 131 * The number of region replicas for the table. 132 */ 133 @InterfaceAudience.Private 134 public static final String REGION_REPLICATION = "REGION_REPLICATION"; 135 private static final Bytes REGION_REPLICATION_KEY = new Bytes(Bytes.toBytes(REGION_REPLICATION)); 136 137 /** 138 * The flag to indicate whether or not the memstore should be replicated for read-replicas 139 * (CONSISTENCY => TIMELINE). 140 */ 141 @InterfaceAudience.Private 142 public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; 143 private static final Bytes REGION_MEMSTORE_REPLICATION_KEY = 144 new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); 145 146 /** 147 * If non-null, the HDFS erasure coding policy to set on the data dir of the table 148 */ 149 public static final String ERASURE_CODING_POLICY = "ERASURE_CODING_POLICY"; 150 private static final Bytes ERASURE_CODING_POLICY_KEY = 151 new Bytes(Bytes.toBytes(ERASURE_CODING_POLICY)); 152 153 private static final String DEFAULT_ERASURE_CODING_POLICY = null; 154 /** 155 * Used by shell/rest interface to access this metadata attribute which denotes if the table 156 * should be treated by region normalizer. 157 */ 158 @InterfaceAudience.Private 159 public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED"; 160 private static final Bytes NORMALIZATION_ENABLED_KEY = 161 new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); 162 163 @InterfaceAudience.Private 164 public static final String NORMALIZER_TARGET_REGION_COUNT = "NORMALIZER_TARGET_REGION_COUNT"; 165 private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY = 166 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); 167 168 @InterfaceAudience.Private 169 public static final String NORMALIZER_TARGET_REGION_SIZE_MB = "NORMALIZER_TARGET_REGION_SIZE_MB"; 170 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_MB_KEY = 171 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE_MB)); 172 // TODO: Keeping backward compatability with HBASE-25651 change. Can be removed in later version 173 @InterfaceAudience.Private 174 @Deprecated 175 public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE"; 176 @Deprecated 177 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY = 178 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); 179 180 /** 181 * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value 182 */ 183 private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; 184 185 @InterfaceAudience.Private 186 public static final String PRIORITY = "PRIORITY"; 187 private static final Bytes PRIORITY_KEY = new Bytes(Bytes.toBytes(PRIORITY)); 188 189 private static final Bytes RSGROUP_KEY = 190 new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP)); 191 192 /** 193 * Relative priority of the table used for rpc scheduling 194 */ 195 private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS; 196 197 /** 198 * Constant that denotes whether the table is READONLY by default and is false 199 */ 200 public static final boolean DEFAULT_READONLY = false; 201 202 /** 203 * Constant that denotes whether the table is compaction enabled by default 204 */ 205 public static final boolean DEFAULT_COMPACTION_ENABLED = true; 206 207 /** 208 * Constant that denotes whether the table is split enabled by default 209 */ 210 public static final boolean DEFAULT_SPLIT_ENABLED = true; 211 212 /** 213 * Constant that denotes whether the table is merge enabled by default 214 */ 215 public static final boolean DEFAULT_MERGE_ENABLED = true; 216 217 /** 218 * Constant that denotes the maximum default size of the memstore in bytes after which the 219 * contents are flushed to the store files. 220 */ 221 public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L; 222 223 public static final int DEFAULT_REGION_REPLICATION = 1; 224 225 public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true; 226 227 private final static Map<String, String> DEFAULT_VALUES = new HashMap<>(); 228 private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>(); 229 230 static { 231 DEFAULT_VALUES.put(MAX_FILESIZE, String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); 232 DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); 233 DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); 234 DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); // use the enum name 235 DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); 236 DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); 237 DEFAULT_VALUES.put(ERASURE_CODING_POLICY, String.valueOf(DEFAULT_ERASURE_CODING_POLICY)); 238 DEFAULT_VALUES.keySet().stream().map(s -> new Bytes(Bytes.toBytes(s))) 239 .forEach(RESERVED_KEYWORDS::add); 240 RESERVED_KEYWORDS.add(IS_META_KEY); 241 } 242 243 public static PrettyPrinter.Unit getUnit(String key) { 244 switch (key) { 245 case MAX_FILESIZE: 246 case MEMSTORE_FLUSHSIZE: 247 return PrettyPrinter.Unit.BYTE; 248 default: 249 return PrettyPrinter.Unit.NONE; 250 } 251 } 252 253 /** 254 * @deprecated namespace table has been folded into the ns family in meta table, do not use this 255 * any more. 256 */ 257 @InterfaceAudience.Private 258 @Deprecated 259 public final static String NAMESPACE_FAMILY_INFO = "info"; 260 261 /** 262 * @deprecated namespace table has been folded into the ns family in meta table, do not use this 263 * any more. 264 */ 265 @InterfaceAudience.Private 266 @Deprecated 267 public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); 268 269 /** 270 * @deprecated namespace table has been folded into the ns family in meta table, do not use this 271 * any more. 272 */ 273 @InterfaceAudience.Private 274 @Deprecated 275 public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); 276 277 /** 278 * <pre> 279 * Pattern that matches a coprocessor specification. Form is: 280 * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]} 281 * where arguments are {@code <KEY> '=' <VALUE> [,...]} 282 * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2} 283 * </pre> 284 */ 285 private static final Pattern CP_HTD_ATTR_VALUE_PATTERN = 286 Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); 287 288 private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; 289 private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; 290 private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile("(" 291 + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); 292 private static final Pattern CP_HTD_ATTR_KEY_PATTERN = 293 Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); 294 295 /** 296 * Table descriptor for namespace table 297 * @deprecated since 3.0.0 and will be removed in 4.0.0. We have folded the data in namespace 298 * table into meta table, so do not use it any more. 299 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21154">HBASE-21154</a> 300 */ 301 @Deprecated 302 public static final TableDescriptor NAMESPACE_TABLEDESC = 303 TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) 304 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES) 305 // Ten is arbitrary number. Keep versions to help debugging. 306 .setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024) 307 .setScope(HConstants.REPLICATION_SCOPE_LOCAL).build()) 308 .build(); 309 310 private final ModifyableTableDescriptor desc; 311 312 /** Returns This instance serialized with pb with pb magic prefix */ 313 public static byte[] toByteArray(TableDescriptor desc) { 314 if (desc instanceof ModifyableTableDescriptor) { 315 return ((ModifyableTableDescriptor) desc).toByteArray(); 316 } 317 return new ModifyableTableDescriptor(desc).toByteArray(); 318 } 319 320 /** 321 * The input should be created by {@link #toByteArray}. 322 * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix 323 * @return This instance serialized with pb with pb magic prefix 324 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException if an error occurred 325 */ 326 public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException { 327 return ModifyableTableDescriptor.parseFrom(pbBytes); 328 } 329 330 public static TableDescriptorBuilder newBuilder(final TableName name) { 331 return new TableDescriptorBuilder(name); 332 } 333 334 public static TableDescriptor copy(TableDescriptor desc) { 335 return new ModifyableTableDescriptor(desc); 336 } 337 338 public static TableDescriptor copy(TableName name, TableDescriptor desc) { 339 return new ModifyableTableDescriptor(name, desc); 340 } 341 342 /** 343 * Copy all values, families, and name from the input. 344 * @param desc The desciptor to copy 345 * @return A clone of input 346 */ 347 public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) { 348 return new TableDescriptorBuilder(desc); 349 } 350 351 private TableDescriptorBuilder(final TableName name) { 352 this.desc = new ModifyableTableDescriptor(name); 353 } 354 355 private TableDescriptorBuilder(final TableDescriptor desc) { 356 this.desc = new ModifyableTableDescriptor(desc); 357 } 358 359 public TableDescriptorBuilder setCoprocessor(String className) throws IOException { 360 return setCoprocessor(CoprocessorDescriptorBuilder.of(className)); 361 } 362 363 public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException { 364 desc.setCoprocessor(Objects.requireNonNull(cpDesc)); 365 return this; 366 } 367 368 public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs) 369 throws IOException { 370 for (CoprocessorDescriptor cpDesc : cpDescs) { 371 desc.setCoprocessor(cpDesc); 372 } 373 return this; 374 } 375 376 public boolean hasCoprocessor(String classNameToMatch) { 377 return desc.hasCoprocessor(classNameToMatch); 378 } 379 380 public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) { 381 desc.setColumnFamily(Objects.requireNonNull(family)); 382 return this; 383 } 384 385 public TableDescriptorBuilder 386 setColumnFamilies(final Collection<ColumnFamilyDescriptor> families) { 387 families.forEach(desc::setColumnFamily); 388 return this; 389 } 390 391 public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) { 392 desc.modifyColumnFamily(Objects.requireNonNull(family)); 393 return this; 394 } 395 396 public TableDescriptorBuilder removeValue(final String key) { 397 desc.removeValue(key); 398 return this; 399 } 400 401 public TableDescriptorBuilder removeValue(Bytes key) { 402 desc.removeValue(key); 403 return this; 404 } 405 406 public TableDescriptorBuilder removeValue(byte[] key) { 407 desc.removeValue(key); 408 return this; 409 } 410 411 public TableDescriptorBuilder removeValue(BiPredicate<Bytes, Bytes> predicate) { 412 List<Bytes> toRemove = 413 desc.getValues().entrySet().stream().filter(e -> predicate.test(e.getKey(), e.getValue())) 414 .map(Map.Entry::getKey).collect(Collectors.toList()); 415 for (Bytes key : toRemove) { 416 removeValue(key); 417 } 418 return this; 419 } 420 421 public TableDescriptorBuilder removeColumnFamily(final byte[] name) { 422 desc.removeColumnFamily(name); 423 return this; 424 } 425 426 public TableDescriptorBuilder removeCoprocessor(String className) { 427 desc.removeCoprocessor(className); 428 return this; 429 } 430 431 public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) { 432 desc.setCompactionEnabled(isEnable); 433 return this; 434 } 435 436 public TableDescriptorBuilder setSplitEnabled(final boolean isEnable) { 437 desc.setSplitEnabled(isEnable); 438 return this; 439 } 440 441 public TableDescriptorBuilder setMergeEnabled(final boolean isEnable) { 442 desc.setMergeEnabled(isEnable); 443 return this; 444 } 445 446 public TableDescriptorBuilder setDurability(Durability durability) { 447 desc.setDurability(durability); 448 return this; 449 } 450 451 public TableDescriptorBuilder setFlushPolicyClassName(String clazz) { 452 desc.setFlushPolicyClassName(clazz); 453 return this; 454 } 455 456 public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { 457 desc.setMaxFileSize(maxFileSize); 458 return this; 459 } 460 461 public TableDescriptorBuilder setMaxFileSize(String maxFileSize) throws HBaseException { 462 desc.setMaxFileSize(maxFileSize); 463 return this; 464 } 465 466 public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { 467 desc.setMemStoreFlushSize(memstoreFlushSize); 468 return this; 469 } 470 471 public TableDescriptorBuilder setMemStoreFlushSize(String memStoreFlushSize) 472 throws HBaseException { 473 desc.setMemStoreFlushSize(memStoreFlushSize); 474 return this; 475 } 476 477 public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) { 478 desc.setNormalizerTargetRegionCount(regionCount); 479 return this; 480 } 481 482 public TableDescriptorBuilder setNormalizerTargetRegionSize(final long regionSize) { 483 desc.setNormalizerTargetRegionSize(regionSize); 484 return this; 485 } 486 487 public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { 488 desc.setNormalizationEnabled(isEnable); 489 return this; 490 } 491 492 public TableDescriptorBuilder setPriority(int priority) { 493 desc.setPriority(priority); 494 return this; 495 } 496 497 public TableDescriptorBuilder setReadOnly(final boolean readOnly) { 498 desc.setReadOnly(readOnly); 499 return this; 500 } 501 502 public TableDescriptorBuilder setErasureCodingPolicy(String policy) { 503 desc.setErasureCodingPolicy(policy); 504 return this; 505 } 506 507 public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) { 508 desc.setRegionMemStoreReplication(memstoreReplication); 509 return this; 510 } 511 512 public TableDescriptorBuilder setRegionReplication(int regionReplication) { 513 desc.setRegionReplication(regionReplication); 514 return this; 515 } 516 517 public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) { 518 desc.setRegionSplitPolicyClassName(clazz); 519 return this; 520 } 521 522 public TableDescriptorBuilder setValue(final String key, final String value) { 523 desc.setValue(key, value); 524 return this; 525 } 526 527 public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) { 528 desc.setValue(key, value); 529 return this; 530 } 531 532 public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) { 533 desc.setValue(key, value); 534 return this; 535 } 536 537 public String getValue(String key) { 538 return desc.getValue(key); 539 } 540 541 /** 542 * Sets replication scope all & only the columns already in the builder. Columns added later won't 543 * be backfilled with replication scope. 544 * @param scope replication scope 545 * @return a TableDescriptorBuilder 546 */ 547 public TableDescriptorBuilder setReplicationScope(int scope) { 548 Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 549 newFamilies.putAll(desc.families); 550 newFamilies.forEach((cf, cfDesc) -> { 551 desc.removeColumnFamily(cf); 552 desc 553 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope).build()); 554 }); 555 return this; 556 } 557 558 public TableDescriptorBuilder setRegionServerGroup(String group) { 559 desc.setValue(RSGROUP_KEY, group); 560 return this; 561 } 562 563 public TableDescriptor build() { 564 return new ModifyableTableDescriptor(desc); 565 } 566 567 private static final class ModifyableTableDescriptor 568 implements TableDescriptor, Comparable<ModifyableTableDescriptor> { 569 570 private final TableName name; 571 572 /** 573 * A map which holds the metadata information of the table. This metadata includes values like 574 * IS_META, SPLIT_POLICY, MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc... 575 */ 576 private final Map<Bytes, Bytes> values = new HashMap<>(); 577 578 /** 579 * Maps column family name to the respective FamilyDescriptors 580 */ 581 private final Map<byte[], ColumnFamilyDescriptor> families = 582 new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 583 584 /** 585 * Construct a table descriptor specifying a TableName object 586 * @param name Table name. 587 */ 588 private ModifyableTableDescriptor(final TableName name) { 589 this(name, Collections.emptyList(), Collections.emptyMap()); 590 } 591 592 private ModifyableTableDescriptor(final TableDescriptor desc) { 593 this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 594 } 595 596 /** 597 * Construct a table descriptor by cloning the descriptor passed as a parameter. 598 * <p> 599 * Makes a deep copy of the supplied descriptor. 600 * @param name The new name 601 * @param desc The descriptor. 602 */ 603 private ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) { 604 this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 605 } 606 607 private ModifyableTableDescriptor(final TableName name, 608 final Collection<ColumnFamilyDescriptor> families, Map<Bytes, Bytes> values) { 609 this.name = name; 610 families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); 611 this.values.putAll(values); 612 this.values.put(IS_META_KEY, 613 new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); 614 } 615 616 /** 617 * Checks if this table is <code> hbase:meta </code> region. 618 * @return true if this table is <code> hbase:meta </code> region 619 */ 620 @Override 621 public boolean isMetaRegion() { 622 return getOrDefault(IS_META_KEY, Boolean::valueOf, false); 623 } 624 625 /** 626 * Checks if the table is a <code>hbase:meta</code> table 627 * @return true if table is <code> hbase:meta </code> region. 628 */ 629 @Override 630 public boolean isMetaTable() { 631 return isMetaRegion(); 632 } 633 634 @Override 635 public Bytes getValue(Bytes key) { 636 Bytes rval = values.get(key); 637 return rval == null ? null : new Bytes(rval.copyBytes()); 638 } 639 640 @Override 641 public String getValue(String key) { 642 Bytes rval = values.get(new Bytes(Bytes.toBytes(key))); 643 return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength()); 644 } 645 646 @Override 647 public byte[] getValue(byte[] key) { 648 Bytes value = values.get(new Bytes(key)); 649 return value == null ? null : value.copyBytes(); 650 } 651 652 private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) { 653 Bytes value = values.get(key); 654 if (value == null) { 655 return defaultValue; 656 } else { 657 return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 658 } 659 } 660 661 /** 662 * Getter for fetching an unmodifiable {@link #values} map. 663 * @return unmodifiable map {@link #values}. 664 * @see #values 665 */ 666 @Override 667 public Map<Bytes, Bytes> getValues() { 668 // shallow pointer copy 669 return Collections.unmodifiableMap(values); 670 } 671 672 /** 673 * Setter for storing metadata as a (key, value) pair in {@link #values} map 674 * @param key The key. 675 * @param value The value. If null, removes the setting. 676 * @return the modifyable TD 677 * @see #values 678 */ 679 public ModifyableTableDescriptor setValue(byte[] key, byte[] value) { 680 return setValue(toBytesOrNull(key, v -> v), toBytesOrNull(value, v -> v)); 681 } 682 683 public ModifyableTableDescriptor setValue(String key, String value) { 684 return setValue(toBytesOrNull(key, Bytes::toBytes), toBytesOrNull(value, Bytes::toBytes)); 685 } 686 687 /** 688 * @param key The key. 689 * @param value The value. If null, removes the setting. 690 */ 691 private ModifyableTableDescriptor setValue(final Bytes key, final String value) { 692 return setValue(key, toBytesOrNull(value, Bytes::toBytes)); 693 } 694 695 /** 696 * Setter for storing metadata as a (key, value) pair in {@link #values} map 697 * @param key The key. 698 * @param value The value. If null, removes the setting. 699 */ 700 public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { 701 if (value == null || value.getLength() == 0) { 702 values.remove(key); 703 } else { 704 values.put(key, value); 705 } 706 return this; 707 } 708 709 private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) { 710 if (t == null) { 711 return null; 712 } else { 713 return new Bytes(f.apply(t)); 714 } 715 } 716 717 /** 718 * Remove metadata represented by the key from the {@link #values} map 719 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 720 * @return the modifyable TD 721 */ 722 public ModifyableTableDescriptor removeValue(final String key) { 723 return setValue(key, (String) null); 724 } 725 726 /** 727 * Remove metadata represented by the key from the {@link #values} map 728 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 729 * @return the modifyable TD 730 */ 731 public ModifyableTableDescriptor removeValue(Bytes key) { 732 return setValue(key, (Bytes) null); 733 } 734 735 /** 736 * Remove metadata represented by the key from the {@link #values} map 737 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 738 * @return the modifyable TD 739 */ 740 public ModifyableTableDescriptor removeValue(final byte[] key) { 741 return removeValue(new Bytes(key)); 742 } 743 744 /** 745 * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents 746 * of the table can only be read from but not modified. 747 * @return true if all columns in the table should be read only 748 */ 749 @Override 750 public boolean isReadOnly() { 751 return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY); 752 } 753 754 /** 755 * Setting the table as read only sets all the columns in the table as read only. By default all 756 * tables are modifiable, but if the readOnly flag is set to true then the contents of the table 757 * can only be read but not modified. 758 * @param readOnly True if all of the columns in the table should be read only. 759 * @return the modifyable TD 760 */ 761 public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { 762 return setValue(READONLY_KEY, Boolean.toString(readOnly)); 763 } 764 765 /** 766 * The HDFS erasure coding policy for a table. This will be set on the data dir of the table, 767 * and is an alternative to normal replication which takes less space at the cost of locality. 768 * @return the current policy, or null if undefined 769 */ 770 @Override 771 public String getErasureCodingPolicy() { 772 return getValue(ERASURE_CODING_POLICY); 773 } 774 775 /** 776 * Sets the HDFS erasure coding policy for the table. This will be propagated to HDFS for the 777 * data dir of the table. Erasure coding is an alternative to normal replication which takes 778 * less space at the cost of locality. The policy must be available and enabled on the hdfs 779 * cluster before being set. 780 * @param policy the policy to set, or null to disable erasure coding 781 * @return the modifyable TD 782 */ 783 public ModifyableTableDescriptor setErasureCodingPolicy(String policy) { 784 return setValue(ERASURE_CODING_POLICY_KEY, policy); 785 } 786 787 /** 788 * Check if the compaction enable flag of the table is true. If flag is false then no 789 * minor/major compactions will be done in real. 790 * @return true if table compaction enabled 791 */ 792 @Override 793 public boolean isCompactionEnabled() { 794 return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED); 795 } 796 797 /** 798 * Setting the table compaction enable flag. 799 * @param isEnable True if enable compaction. 800 * @return the modifyable TD 801 */ 802 public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) { 803 return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable)); 804 } 805 806 /** 807 * Check if the split enable flag of the table is true. If flag is false then no split will be 808 * done. 809 * @return true if table region split enabled 810 */ 811 @Override 812 public boolean isSplitEnabled() { 813 return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED); 814 } 815 816 /** 817 * Setting the table region split enable flag. 818 * @param isEnable True if enable region split. 819 * @return the modifyable TD 820 */ 821 public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) { 822 return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable)); 823 } 824 825 /** 826 * Check if the region merge enable flag of the table is true. If flag is false then no merge 827 * will be done. 828 * @return true if table region merge enabled 829 */ 830 @Override 831 public boolean isMergeEnabled() { 832 return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED); 833 } 834 835 /** 836 * Setting the table region merge enable flag. 837 * @param isEnable True if enable region merge. 838 * @return the modifyable TD 839 */ 840 public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) { 841 return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable)); 842 } 843 844 /** 845 * Check if normalization enable flag of the table is true. If flag is false then no region 846 * normalizer won't attempt to normalize this table. 847 * @return true if region normalization is enabled for this table 848 **/ 849 @Override 850 public boolean isNormalizationEnabled() { 851 return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, false); 852 } 853 854 /** 855 * Check if there is the target region count. If so, the normalize plan will be calculated based 856 * on the target region count. 857 * @return target region count after normalize done 858 */ 859 @Override 860 public int getNormalizerTargetRegionCount() { 861 return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf, 862 Integer.valueOf(-1)); 863 } 864 865 /** 866 * Check if there is the target region size. If so, the normalize plan will be calculated based 867 * on the target region size. 868 * @return target region size after normalize done 869 */ 870 @Override 871 public long getNormalizerTargetRegionSize() { 872 long target_region_size = 873 getOrDefault(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long::valueOf, Long.valueOf(-1)); 874 return target_region_size == Long.valueOf(-1) 875 ? getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)) 876 : target_region_size; 877 } 878 879 /** 880 * Setting the table normalization enable flag. 881 * @param isEnable True if enable normalization. 882 * @return the modifyable TD 883 */ 884 public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) { 885 return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable)); 886 } 887 888 /** 889 * Setting the target region count of table normalization . 890 * @param regionCount the target region count. 891 * @return the modifyable TD 892 */ 893 public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) { 894 return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount)); 895 } 896 897 /** 898 * Setting the target region size of table normalization. 899 * @param regionSize the target region size. 900 * @return the modifyable TD 901 */ 902 public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) { 903 return setValue(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long.toString(regionSize)); 904 } 905 906 /** 907 * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT. 908 * @param durability enum value 909 * @return the modifyable TD 910 */ 911 public ModifyableTableDescriptor setDurability(Durability durability) { 912 return setValue(DURABILITY_KEY, durability.name()); 913 } 914 915 /** 916 * Returns the durability setting for the table. 917 * @return durability setting for the table. 918 */ 919 @Override 920 public Durability getDurability() { 921 return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY); 922 } 923 924 /** 925 * Get the name of the table 926 */ 927 @Override 928 public TableName getTableName() { 929 return name; 930 } 931 932 /** 933 * This sets the class associated with the region split policy which determines when a region 934 * split should occur. The class used by default is defined in 935 * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 936 * @param clazz the class name 937 * @return the modifyable TD 938 */ 939 public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) { 940 return setValue(SPLIT_POLICY_KEY, clazz); 941 } 942 943 /** 944 * This gets the class associated with the region split policy which determines when a region 945 * split should occur. The class used by default is defined in 946 * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 947 * @return the class name of the region split policy for this table. If this returns null, the 948 * default split policy is used. 949 */ 950 @Override 951 public String getRegionSplitPolicyClassName() { 952 return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null); 953 } 954 955 /** 956 * Returns the maximum size upto which a region can grow to after which a region split is 957 * triggered. The region size is represented by the size of the biggest store file in that 958 * region. 959 * @return max hregion size for table, -1 if not set. 960 * @see #setMaxFileSize(long) 961 */ 962 @Override 963 public long getMaxFileSize() { 964 return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1); 965 } 966 967 /** 968 * Sets the maximum size upto which a region can grow to after which a region split is 969 * triggered. The region size is represented by the size of the biggest store file in that 970 * region, i.e. If the biggest store file grows beyond the maxFileSize, then the region split is 971 * triggered. This defaults to a value of 256 MB. 972 * <p> 973 * This is not an absolute value and might vary. Assume that a single row exceeds the 974 * maxFileSize then the storeFileSize will be greater than maxFileSize since a single row cannot 975 * be split across multiple regions 976 * </p> 977 * @param maxFileSize The maximum file size that a store file can grow to before a split is 978 * triggered. 979 * @return the modifyable TD 980 */ 981 public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { 982 return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); 983 } 984 985 public ModifyableTableDescriptor setMaxFileSize(String maxFileSize) throws HBaseException { 986 return setMaxFileSize( 987 Long.parseLong(PrettyPrinter.valueOf(maxFileSize, PrettyPrinter.Unit.BYTE))); 988 } 989 990 /** 991 * Returns the size of the memstore after which a flush to filesystem is triggered. 992 * @return memory cache flush size for each hregion, -1 if not set. 993 * @see #setMemStoreFlushSize(long) 994 */ 995 @Override 996 public long getMemStoreFlushSize() { 997 return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1); 998 } 999 1000 /** 1001 * Represents the maximum size of the memstore after which the contents of the memstore are 1002 * flushed to the filesystem. This defaults to a size of 64 MB. 1003 * @param memstoreFlushSize memory cache flush size for each hregion 1004 * @return the modifyable TD 1005 */ 1006 public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { 1007 return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); 1008 } 1009 1010 public ModifyableTableDescriptor setMemStoreFlushSize(String memStoreFlushSize) 1011 throws HBaseException { 1012 return setMemStoreFlushSize( 1013 Long.parseLong(PrettyPrinter.valueOf(memStoreFlushSize, PrettyPrinter.Unit.BYTE))); 1014 } 1015 1016 /** 1017 * This sets the class associated with the flush policy which determines determines the stores 1018 * need to be flushed when flushing a region. The class used by default is defined in 1019 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 1020 * @param clazz the class name 1021 * @return the modifyable TD 1022 */ 1023 public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) { 1024 return setValue(FLUSH_POLICY_KEY, clazz); 1025 } 1026 1027 /** 1028 * This gets the class associated with the flush policy which determines the stores need to be 1029 * flushed when flushing a region. The class used by default is defined in 1030 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 1031 * @return the class name of the flush policy for this table. If this returns null, the default 1032 * flush policy is used. 1033 */ 1034 @Override 1035 public String getFlushPolicyClassName() { 1036 return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null); 1037 } 1038 1039 /** 1040 * Adds a column family. For the updating purpose please use 1041 * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. 1042 * @param family to add. 1043 * @return the modifyable TD 1044 */ 1045 public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) { 1046 if (family.getName() == null || family.getName().length <= 0) { 1047 throw new IllegalArgumentException("Family name cannot be null or empty"); 1048 } 1049 int flength = family.getName() == null ? 0 : family.getName().length; 1050 if (flength > Byte.MAX_VALUE) { 1051 throw new IllegalArgumentException( 1052 "The length of family name is bigger than " + Byte.MAX_VALUE); 1053 } 1054 if (hasColumnFamily(family.getName())) { 1055 throw new IllegalArgumentException( 1056 "Family '" + family.getNameAsString() + "' already exists so cannot be added"); 1057 } 1058 return putColumnFamily(family); 1059 } 1060 1061 /** 1062 * Modifies the existing column family. 1063 * @param family to update 1064 * @return this (for chained invocation) 1065 */ 1066 public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) { 1067 if (family.getName() == null || family.getName().length <= 0) { 1068 throw new IllegalArgumentException("Family name cannot be null or empty"); 1069 } 1070 if (!hasColumnFamily(family.getName())) { 1071 throw new IllegalArgumentException( 1072 "Column family '" + family.getNameAsString() + "' does not exist"); 1073 } 1074 return putColumnFamily(family); 1075 } 1076 1077 private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) { 1078 families.put(family.getName(), family); 1079 return this; 1080 } 1081 1082 /** 1083 * Checks to see if this table contains the given column family 1084 * @param familyName Family name or column name. 1085 * @return true if the table contains the specified family name 1086 */ 1087 @Override 1088 public boolean hasColumnFamily(final byte[] familyName) { 1089 return families.containsKey(familyName); 1090 } 1091 1092 /** Returns Name of this table and then a map of all of the column family descriptors. */ 1093 @Override 1094 public String toString() { 1095 StringBuilder s = new StringBuilder(); 1096 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1097 s.append(getValues(true)); 1098 families.values().forEach(f -> s.append(", ").append(f)); 1099 return s.toString(); 1100 } 1101 1102 /** 1103 * @return Name of this table and then a map of all of the column family descriptors (with only 1104 * the non-default column family attributes) 1105 */ 1106 @Override 1107 public String toStringCustomizedValues() { 1108 StringBuilder s = new StringBuilder(); 1109 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1110 s.append(getValues(false)); 1111 families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues())); 1112 return s.toString(); 1113 } 1114 1115 /** Returns map of all table attributes formatted into string. */ 1116 public String toStringTableAttributes() { 1117 return getValues(true).toString(); 1118 } 1119 1120 private StringBuilder getValues(boolean printDefaults) { 1121 StringBuilder s = new StringBuilder(); 1122 1123 // step 1: set partitioning and pruning 1124 Set<Bytes> reservedKeys = new TreeSet<>(); 1125 Set<Bytes> userKeys = new TreeSet<>(); 1126 for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) { 1127 if (entry.getKey() == null || entry.getKey().get() == null) { 1128 continue; 1129 } 1130 String key = Bytes.toString(entry.getKey().get()); 1131 // in this section, print out reserved keywords + coprocessor info 1132 if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) { 1133 userKeys.add(entry.getKey()); 1134 continue; 1135 } 1136 // only print out IS_META if true 1137 String value = Bytes.toString(entry.getValue().get()); 1138 if (key.equalsIgnoreCase(IS_META)) { 1139 if (Boolean.valueOf(value) == false) { 1140 continue; 1141 } 1142 } 1143 // see if a reserved key is a default value. may not want to print it out 1144 if ( 1145 printDefaults || !DEFAULT_VALUES.containsKey(key) 1146 || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value) 1147 ) { 1148 reservedKeys.add(entry.getKey()); 1149 } 1150 } 1151 1152 // early exit optimization 1153 boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty(); 1154 if (!hasAttributes) { 1155 return s; 1156 } 1157 1158 s.append(", {"); 1159 // step 2: printing attributes 1160 if (hasAttributes) { 1161 s.append("TABLE_ATTRIBUTES => {"); 1162 1163 // print all reserved keys first 1164 boolean printCommaForAttr = false; 1165 for (Bytes k : reservedKeys) { 1166 String key = Bytes.toString(k.get()); 1167 String value = Bytes.toStringBinary(values.get(k).get()); 1168 if (printCommaForAttr) { 1169 s.append(", "); 1170 } 1171 printCommaForAttr = true; 1172 s.append(key); 1173 s.append(" => "); 1174 s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); 1175 } 1176 1177 if (!userKeys.isEmpty()) { 1178 // print all non-reserved as a separate subset 1179 if (printCommaForAttr) { 1180 s.append(", "); 1181 } 1182 s.append(HConstants.METADATA).append(" => "); 1183 s.append("{"); 1184 boolean printCommaForCfg = false; 1185 for (Bytes k : userKeys) { 1186 String key = Bytes.toString(k.get()); 1187 String value = Bytes.toStringBinary(values.get(k).get()); 1188 if (printCommaForCfg) { 1189 s.append(", "); 1190 } 1191 printCommaForCfg = true; 1192 s.append('\'').append(key).append('\''); 1193 s.append(" => "); 1194 s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); 1195 } 1196 s.append("}"); 1197 } 1198 1199 s.append("}"); 1200 } 1201 1202 s.append("}"); // end METHOD 1203 return s; 1204 } 1205 1206 /** 1207 * Compare the contents of the descriptor with another one passed as a parameter. Checks if the 1208 * obj passed is an instance of ModifyableTableDescriptor, if yes then the contents of the 1209 * descriptors are compared. 1210 * @param obj The object to compare 1211 * @return true if the contents of the the two descriptors exactly match 1212 * @see java.lang.Object#equals(java.lang.Object) 1213 */ 1214 @Override 1215 public boolean equals(Object obj) { 1216 if (this == obj) { 1217 return true; 1218 } 1219 if (obj instanceof ModifyableTableDescriptor) { 1220 return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0; 1221 } 1222 return false; 1223 } 1224 1225 /** Returns hash code */ 1226 @Override 1227 public int hashCode() { 1228 int result = this.name.hashCode(); 1229 if (this.families.size() > 0) { 1230 for (ColumnFamilyDescriptor e : this.families.values()) { 1231 result ^= e.hashCode(); 1232 } 1233 } 1234 result ^= values.hashCode(); 1235 return result; 1236 } 1237 1238 // Comparable 1239 /** 1240 * Compares the descriptor with another descriptor which is passed as a parameter. This compares 1241 * the content of the two descriptors and not the reference. 1242 * @param other The MTD to compare 1243 * @return 0 if the contents of the descriptors are exactly matching, 1 if there is a mismatch 1244 * in the contents 1245 */ 1246 @Override 1247 public int compareTo(final ModifyableTableDescriptor other) { 1248 return TableDescriptor.COMPARATOR.compare(this, other); 1249 } 1250 1251 @Override 1252 public ColumnFamilyDescriptor[] getColumnFamilies() { 1253 return families.values().toArray(new ColumnFamilyDescriptor[families.size()]); 1254 } 1255 1256 /** 1257 * Returns the configured replicas per region 1258 */ 1259 @Override 1260 public int getRegionReplication() { 1261 return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION); 1262 } 1263 1264 /** 1265 * Sets the number of replicas per region. 1266 * @param regionReplication the replication factor per region 1267 * @return the modifyable TD 1268 */ 1269 public ModifyableTableDescriptor setRegionReplication(int regionReplication) { 1270 return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication)); 1271 } 1272 1273 /** Returns true if the read-replicas memstore replication is enabled. */ 1274 @Override 1275 public boolean hasRegionMemStoreReplication() { 1276 return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, 1277 DEFAULT_REGION_MEMSTORE_REPLICATION); 1278 } 1279 1280 /** 1281 * Enable or Disable the memstore replication from the primary region to the replicas. The 1282 * replication will be used only for meta operations (e.g. flush, compaction, ...) 1283 * @param memstoreReplication true if the new data written to the primary region should be 1284 * replicated. false if the secondaries can tollerate to have new 1285 * data only when the primary flushes the memstore. 1286 * @return the modifyable TD 1287 */ 1288 public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { 1289 return setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication)); 1290 } 1291 1292 public ModifyableTableDescriptor setPriority(int priority) { 1293 return setValue(PRIORITY_KEY, Integer.toString(priority)); 1294 } 1295 1296 @Override 1297 public int getPriority() { 1298 return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY); 1299 } 1300 1301 /** 1302 * Returns all the column family names of the current table. The map of TableDescriptor contains 1303 * mapping of family name to ColumnFamilyDescriptor. This returns all the keys of the family map 1304 * which represents the column family names of the table. 1305 * @return Immutable sorted set of the keys of the families. 1306 */ 1307 @Override 1308 public Set<byte[]> getColumnFamilyNames() { 1309 return Collections.unmodifiableSet(this.families.keySet()); 1310 } 1311 1312 /** 1313 * Returns the ColumnFamilyDescriptor for a specific column family with name as specified by the 1314 * parameter column. 1315 * @param column Column family name 1316 * @return Column descriptor for the passed family name or the family on passed in column. 1317 */ 1318 @Override 1319 public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { 1320 return this.families.get(column); 1321 } 1322 1323 /** 1324 * Removes the ColumnFamilyDescriptor with name specified by the parameter column from the table 1325 * descriptor 1326 * @param column Name of the column family to be removed. 1327 * @return Column descriptor for the passed family name or the family on passed in column. 1328 */ 1329 public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { 1330 return this.families.remove(column); 1331 } 1332 1333 /** 1334 * Add a table coprocessor to this table. The coprocessor type must be 1335 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1336 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1337 * region is opened. 1338 * @param className Full class name. 1339 * @return the modifyable TD 1340 */ 1341 public ModifyableTableDescriptor setCoprocessor(String className) throws IOException { 1342 return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) 1343 .setPriority(Coprocessor.PRIORITY_USER).build()); 1344 } 1345 1346 /** 1347 * Add a table coprocessor to this table. The coprocessor type must be 1348 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1349 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1350 * region is opened. 1351 * @throws IOException any illegal parameter key/value 1352 * @return the modifyable TD 1353 */ 1354 public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throws IOException { 1355 checkHasCoprocessor(cp.getClassName()); 1356 if (cp.getPriority() < 0) { 1357 throw new IOException( 1358 "Priority must be bigger than or equal with zero, current:" + cp.getPriority()); 1359 } 1360 // Validate parameter kvs and then add key/values to kvString. 1361 StringBuilder kvString = new StringBuilder(); 1362 for (Map.Entry<String, String> e : cp.getProperties().entrySet()) { 1363 if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) { 1364 throw new IOException("Illegal parameter key = " + e.getKey()); 1365 } 1366 if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { 1367 throw new IOException("Illegal parameter (" + e.getKey() + ") value = " + e.getValue()); 1368 } 1369 if (kvString.length() != 0) { 1370 kvString.append(','); 1371 } 1372 kvString.append(e.getKey()); 1373 kvString.append('='); 1374 kvString.append(e.getValue()); 1375 } 1376 1377 String value = cp.getJarPath().orElse("") + "|" + cp.getClassName() + "|" 1378 + Integer.toString(cp.getPriority()) + "|" + kvString.toString(); 1379 return setCoprocessorToMap(value); 1380 } 1381 1382 /** 1383 * Add a table coprocessor to this table. The coprocessor type must be 1384 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1385 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1386 * region is opened. 1387 * @param specStr The Coprocessor specification all in in one String 1388 * @return the modifyable TD 1389 * @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed 1390 * in HBase 3.0.0. 1391 */ 1392 @Deprecated 1393 public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr) 1394 throws IOException { 1395 CoprocessorDescriptor cpDesc = 1396 toCoprocessorDescriptor(specStr).orElseThrow(() -> new IllegalArgumentException( 1397 "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr)); 1398 checkHasCoprocessor(cpDesc.getClassName()); 1399 return setCoprocessorToMap(specStr); 1400 } 1401 1402 private void checkHasCoprocessor(final String className) throws IOException { 1403 if (hasCoprocessor(className)) { 1404 throw new IOException("Coprocessor " + className + " already exists."); 1405 } 1406 } 1407 1408 /** 1409 * Add coprocessor to values Map 1410 * @param specStr The Coprocessor specification all in in one String 1411 * @return Returns <code>this</code> 1412 */ 1413 private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) { 1414 if (specStr == null) { 1415 return this; 1416 } 1417 // generate a coprocessor key 1418 int maxCoprocessorNumber = 0; 1419 Matcher keyMatcher; 1420 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1421 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1422 if (!keyMatcher.matches()) { 1423 continue; 1424 } 1425 maxCoprocessorNumber = 1426 Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); 1427 } 1428 maxCoprocessorNumber++; 1429 String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); 1430 return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr))); 1431 } 1432 1433 /** 1434 * Check if the table has an attached co-processor represented by the name className 1435 * @param classNameToMatch - Class name of the co-processor 1436 * @return true of the table has a co-processor className 1437 */ 1438 @Override 1439 public boolean hasCoprocessor(String classNameToMatch) { 1440 return getCoprocessorDescriptors().stream() 1441 .anyMatch(cp -> cp.getClassName().equals(classNameToMatch)); 1442 } 1443 1444 /** 1445 * Return the list of attached co-processor represented by their name className 1446 * @return The list of co-processors classNames 1447 */ 1448 @Override 1449 public List<CoprocessorDescriptor> getCoprocessorDescriptors() { 1450 List<CoprocessorDescriptor> result = new ArrayList<>(); 1451 for (Map.Entry<Bytes, Bytes> e : getValues().entrySet()) { 1452 String key = Bytes.toString(e.getKey().get()).trim(); 1453 if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) { 1454 toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()).ifPresent(result::add); 1455 } 1456 } 1457 return result; 1458 } 1459 1460 /** 1461 * Remove a coprocessor from those set on the table 1462 * @param className Class name of the co-processor 1463 */ 1464 public void removeCoprocessor(String className) { 1465 Bytes match = null; 1466 Matcher keyMatcher; 1467 Matcher valueMatcher; 1468 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1469 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1470 if (!keyMatcher.matches()) { 1471 continue; 1472 } 1473 valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes.toString(e.getValue().get())); 1474 if (!valueMatcher.matches()) { 1475 continue; 1476 } 1477 // get className and compare 1478 String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field 1479 // remove the CP if it is present 1480 if (clazz.equals(className.trim())) { 1481 match = e.getKey(); 1482 break; 1483 } 1484 } 1485 // if we found a match, remove it 1486 if (match != null) { 1487 ModifyableTableDescriptor.this.removeValue(match); 1488 } else { 1489 throw new IllegalArgumentException(String.format( 1490 "coprocessor with class name %s was not found in the table attribute", className)); 1491 } 1492 } 1493 1494 /** Returns the bytes in pb format */ 1495 private byte[] toByteArray() { 1496 return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray()); 1497 } 1498 1499 /** 1500 * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix 1501 * @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code> 1502 * @see #toByteArray() 1503 */ 1504 private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException { 1505 if (!ProtobufUtil.isPBMagicPrefix(bytes)) { 1506 throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); 1507 } 1508 int pblen = ProtobufUtil.lengthOfPBMagic(); 1509 HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder(); 1510 try { 1511 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); 1512 return ProtobufUtil.toTableDescriptor(builder.build()); 1513 } catch (IOException e) { 1514 throw new DeserializationException(e); 1515 } 1516 } 1517 1518 @Override 1519 public int getColumnFamilyCount() { 1520 return families.size(); 1521 } 1522 1523 @Override 1524 public Optional<String> getRegionServerGroup() { 1525 Bytes value = values.get(RSGROUP_KEY); 1526 if (value != null) { 1527 return Optional.of(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 1528 } else { 1529 return Optional.empty(); 1530 } 1531 } 1532 } 1533 1534 /** 1535 * This method is mostly intended for internal use. However, it it also relied on by hbase-shell 1536 * for backwards compatibility. 1537 */ 1538 private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) { 1539 Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec); 1540 if (matcher.matches()) { 1541 // jar file path can be empty if the cp class can be loaded 1542 // from class loader. 1543 String path = matcher.group(1).trim().isEmpty() ? null : matcher.group(1).trim(); 1544 String className = matcher.group(2).trim(); 1545 if (className.isEmpty()) { 1546 return Optional.empty(); 1547 } 1548 String priorityStr = matcher.group(3).trim(); 1549 int priority = 1550 priorityStr.isEmpty() ? Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); 1551 String cfgSpec = null; 1552 try { 1553 cfgSpec = matcher.group(4); 1554 } catch (IndexOutOfBoundsException ex) { 1555 // ignore 1556 } 1557 Map<String, String> ourConf = new TreeMap<>(); 1558 if (cfgSpec != null && !cfgSpec.trim().equals("|")) { 1559 cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1); 1560 Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec); 1561 while (m.find()) { 1562 ourConf.put(m.group(1), m.group(2)); 1563 } 1564 } 1565 return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) 1566 .setPriority(priority).setProperties(ourConf).build()); 1567 } 1568 return Optional.empty(); 1569 } 1570}