001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import java.io.IOException; 021import java.util.ArrayList; 022import java.util.Arrays; 023import java.util.Collection; 024import java.util.Collections; 025import java.util.HashMap; 026import java.util.HashSet; 027import java.util.List; 028import java.util.Map; 029import java.util.Objects; 030import java.util.Optional; 031import java.util.Set; 032import java.util.TreeMap; 033import java.util.TreeSet; 034import java.util.function.Function; 035import java.util.regex.Matcher; 036import java.util.regex.Pattern; 037import org.apache.hadoop.fs.Path; 038import org.apache.hadoop.hbase.Coprocessor; 039import org.apache.hadoop.hbase.HConstants; 040import org.apache.hadoop.hbase.TableName; 041import org.apache.hadoop.hbase.exceptions.DeserializationException; 042import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; 043import org.apache.hadoop.hbase.security.User; 044import org.apache.hadoop.hbase.util.Bytes; 045import org.apache.yetus.audience.InterfaceAudience; 046import org.slf4j.Logger; 047import org.slf4j.LoggerFactory; 048 049import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 050import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 051 052/** 053 * Convenience class for composing an instance of {@link TableDescriptor}. 054 * @since 2.0.0 055 */ 056@InterfaceAudience.Public 057public class TableDescriptorBuilder { 058 public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class); 059 @InterfaceAudience.Private 060 public static final String SPLIT_POLICY = "SPLIT_POLICY"; 061 private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); 062 /** 063 * Used by HBase Shell interface to access this metadata attribute which denotes the maximum size 064 * of the store file after which a region split occurs. 065 */ 066 @InterfaceAudience.Private 067 public static final String MAX_FILESIZE = "MAX_FILESIZE"; 068 private static final Bytes MAX_FILESIZE_KEY = new Bytes(Bytes.toBytes(MAX_FILESIZE)); 069 070 @InterfaceAudience.Private 071 public static final String OWNER = "OWNER"; 072 @InterfaceAudience.Private 073 public static final Bytes OWNER_KEY = new Bytes(Bytes.toBytes(OWNER)); 074 075 /** 076 * Used by rest interface to access this metadata attribute which denotes if the table is Read 077 * Only. 078 */ 079 @InterfaceAudience.Private 080 public static final String READONLY = "READONLY"; 081 private static final Bytes READONLY_KEY = new Bytes(Bytes.toBytes(READONLY)); 082 083 /** 084 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 085 * compaction enabled. 086 */ 087 @InterfaceAudience.Private 088 public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED"; 089 private static final Bytes COMPACTION_ENABLED_KEY = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); 090 091 /** 092 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 093 * split enabled. 094 */ 095 @InterfaceAudience.Private 096 public static final String SPLIT_ENABLED = "SPLIT_ENABLED"; 097 private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED)); 098 099 /** 100 * Used by HBase Shell interface to access this metadata attribute which denotes if the table is 101 * merge enabled. 102 */ 103 @InterfaceAudience.Private 104 public static final String MERGE_ENABLED = "MERGE_ENABLED"; 105 private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED)); 106 107 /** 108 * Constant that denotes whether the table is normalized by default. 109 */ 110 @InterfaceAudience.Private 111 public static final boolean DEFAULT_NORMALIZATION_ENABLED = false; 112 113 /** 114 * Used by HBase Shell interface to access this metadata attribute which represents the maximum 115 * size of the memstore after which its contents are flushed onto the disk. 116 */ 117 @InterfaceAudience.Private 118 public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; 119 private static final Bytes MEMSTORE_FLUSHSIZE_KEY = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); 120 121 @InterfaceAudience.Private 122 public static final String FLUSH_POLICY = "FLUSH_POLICY"; 123 private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); 124 /** 125 * Used by rest interface to access this metadata attribute which denotes if it is a catalog 126 * table, either <code> hbase:meta </code>. 127 */ 128 @InterfaceAudience.Private 129 public static final String IS_META = "IS_META"; 130 private static final Bytes IS_META_KEY = new Bytes(Bytes.toBytes(IS_META)); 131 132 /** 133 * {@link Durability} setting for the table. 134 */ 135 @InterfaceAudience.Private 136 public static final String DURABILITY = "DURABILITY"; 137 private static final Bytes DURABILITY_KEY = new Bytes(Bytes.toBytes("DURABILITY")); 138 139 /** 140 * The number of region replicas for the table. 141 */ 142 @InterfaceAudience.Private 143 public static final String REGION_REPLICATION = "REGION_REPLICATION"; 144 private static final Bytes REGION_REPLICATION_KEY = new Bytes(Bytes.toBytes(REGION_REPLICATION)); 145 146 /** 147 * The flag to indicate whether or not the memstore should be replicated for read-replicas 148 * (CONSISTENCY => TIMELINE). 149 */ 150 @InterfaceAudience.Private 151 public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; 152 private static final Bytes REGION_MEMSTORE_REPLICATION_KEY = 153 new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); 154 155 private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY = 156 new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY)); 157 /** 158 * Used by shell/rest interface to access this metadata attribute which denotes if the table 159 * should be treated by region normalizer. 160 */ 161 @InterfaceAudience.Private 162 public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED"; 163 private static final Bytes NORMALIZATION_ENABLED_KEY = 164 new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); 165 166 @InterfaceAudience.Private 167 public static final String NORMALIZER_TARGET_REGION_COUNT = "NORMALIZER_TARGET_REGION_COUNT"; 168 private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY = 169 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); 170 171 @InterfaceAudience.Private 172 public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE"; 173 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY = 174 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); 175 176 /** 177 * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value 178 */ 179 private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; 180 181 @InterfaceAudience.Private 182 public static final String PRIORITY = "PRIORITY"; 183 private static final Bytes PRIORITY_KEY = new Bytes(Bytes.toBytes(PRIORITY)); 184 185 private static final Bytes RSGROUP_KEY = 186 new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP)); 187 188 /** 189 * Relative priority of the table used for rpc scheduling 190 */ 191 private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS; 192 193 /** 194 * Constant that denotes whether the table is READONLY by default and is false 195 */ 196 public static final boolean DEFAULT_READONLY = false; 197 198 /** 199 * Constant that denotes whether the table is compaction enabled by default 200 */ 201 public static final boolean DEFAULT_COMPACTION_ENABLED = true; 202 203 /** 204 * Constant that denotes whether the table is split enabled by default 205 */ 206 public static final boolean DEFAULT_SPLIT_ENABLED = true; 207 208 /** 209 * Constant that denotes whether the table is merge enabled by default 210 */ 211 public static final boolean DEFAULT_MERGE_ENABLED = true; 212 213 /** 214 * Constant that denotes the maximum default size of the memstore in bytes after which the 215 * contents are flushed to the store files. 216 */ 217 public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L; 218 219 public static final int DEFAULT_REGION_REPLICATION = 1; 220 221 public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true; 222 223 private final static Map<String, String> DEFAULT_VALUES = new HashMap<>(); 224 private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>(); 225 226 static { 227 DEFAULT_VALUES.put(MAX_FILESIZE, String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); 228 DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); 229 DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); 230 DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); // use the enum name 231 DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); 232 DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); 233 DEFAULT_VALUES.keySet().stream().map(s -> new Bytes(Bytes.toBytes(s))) 234 .forEach(RESERVED_KEYWORDS::add); 235 RESERVED_KEYWORDS.add(IS_META_KEY); 236 } 237 238 @InterfaceAudience.Private 239 public final static String NAMESPACE_FAMILY_INFO = "info"; 240 @InterfaceAudience.Private 241 public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); 242 @InterfaceAudience.Private 243 public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); 244 245 /** 246 * <pre> 247 * Pattern that matches a coprocessor specification. Form is: 248 * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]} 249 * where arguments are {@code <KEY> '=' <VALUE> [,...]} 250 * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2} 251 * </pre> 252 */ 253 private static final Pattern CP_HTD_ATTR_VALUE_PATTERN = 254 Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); 255 256 private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; 257 private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; 258 private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile("(" 259 + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); 260 private static final Pattern CP_HTD_ATTR_KEY_PATTERN = 261 Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); 262 263 /** 264 * Table descriptor for namespace table 265 */ 266 // TODO We used to set CacheDataInL1 for NS table. When we have BucketCache in file mode, now the 267 // NS data goes to File mode BC only. Test how that affect the system. If too much, we have to 268 // rethink about adding back the setCacheDataInL1 for NS table. 269 // Note: namespace schema is hard-coded. In hbase3, namespace goes away; it is integrated into 270 // hbase:meta. 271 public static final TableDescriptor NAMESPACE_TABLEDESC = 272 TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) 273 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES) 274 // Ten is arbitrary number. Keep versions to help debugging. 275 .setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024) 276 .setScope(HConstants.REPLICATION_SCOPE_LOCAL).build()) 277 .build(); 278 279 private final ModifyableTableDescriptor desc; 280 281 /** 282 * @param desc The table descriptor to serialize 283 * @return This instance serialized with pb with pb magic prefix 284 */ 285 public static byte[] toByteArray(TableDescriptor desc) { 286 if (desc instanceof ModifyableTableDescriptor) { 287 return ((ModifyableTableDescriptor) desc).toByteArray(); 288 } 289 return new ModifyableTableDescriptor(desc).toByteArray(); 290 } 291 292 /** 293 * The input should be created by {@link #toByteArray}. 294 * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix 295 * @return This instance serialized with pb with pb magic prefix 296 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException 297 */ 298 public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException { 299 return ModifyableTableDescriptor.parseFrom(pbBytes); 300 } 301 302 public static TableDescriptorBuilder newBuilder(final TableName name) { 303 return new TableDescriptorBuilder(name); 304 } 305 306 public static TableDescriptor copy(TableDescriptor desc) { 307 return new ModifyableTableDescriptor(desc); 308 } 309 310 public static TableDescriptor copy(TableName name, TableDescriptor desc) { 311 return new ModifyableTableDescriptor(name, desc); 312 } 313 314 /** 315 * Copy all values, families, and name from the input. 316 * @param desc The desciptor to copy 317 * @return A clone of input 318 */ 319 public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) { 320 return new TableDescriptorBuilder(desc); 321 } 322 323 private TableDescriptorBuilder(final TableName name) { 324 this.desc = new ModifyableTableDescriptor(name); 325 } 326 327 private TableDescriptorBuilder(final TableDescriptor desc) { 328 this.desc = new ModifyableTableDescriptor(desc); 329 } 330 331 /** 332 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 333 * {@link #setCoprocessor(String)} instead 334 */ 335 @Deprecated 336 public TableDescriptorBuilder addCoprocessor(String className) throws IOException { 337 return addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null); 338 } 339 340 /** 341 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 342 * {@link #setCoprocessor(CoprocessorDescriptor)} instead 343 */ 344 @Deprecated 345 public TableDescriptorBuilder addCoprocessor(String className, Path jarFilePath, int priority, 346 final Map<String, String> kvs) throws IOException { 347 desc.setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) 348 .setJarPath(jarFilePath == null ? null : jarFilePath.toString()).setPriority(priority) 349 .setProperties(kvs == null ? Collections.emptyMap() : kvs).build()); 350 return this; 351 } 352 353 /** 354 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 355 * {@link #setCoprocessor(CoprocessorDescriptor)} instead 356 */ 357 @Deprecated 358 public TableDescriptorBuilder addCoprocessorWithSpec(final String specStr) throws IOException { 359 desc.setCoprocessorWithSpec(specStr); 360 return this; 361 } 362 363 /** 364 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use 365 * {@link #setColumnFamily(ColumnFamilyDescriptor)} instead 366 */ 367 @Deprecated 368 public TableDescriptorBuilder addColumnFamily(final ColumnFamilyDescriptor family) { 369 desc.setColumnFamily(family); 370 return this; 371 } 372 373 public TableDescriptorBuilder setCoprocessor(String className) throws IOException { 374 return setCoprocessor(CoprocessorDescriptorBuilder.of(className)); 375 } 376 377 public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException { 378 desc.setCoprocessor(Objects.requireNonNull(cpDesc)); 379 return this; 380 } 381 382 public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs) 383 throws IOException { 384 for (CoprocessorDescriptor cpDesc : cpDescs) { 385 desc.setCoprocessor(cpDesc); 386 } 387 return this; 388 } 389 390 public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) { 391 desc.setColumnFamily(Objects.requireNonNull(family)); 392 return this; 393 } 394 395 public TableDescriptorBuilder 396 setColumnFamilies(final Collection<ColumnFamilyDescriptor> families) { 397 families.forEach(desc::setColumnFamily); 398 return this; 399 } 400 401 public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) { 402 desc.modifyColumnFamily(Objects.requireNonNull(family)); 403 return this; 404 } 405 406 public TableDescriptorBuilder removeValue(Bytes key) { 407 desc.removeValue(key); 408 return this; 409 } 410 411 public TableDescriptorBuilder removeValue(byte[] key) { 412 desc.removeValue(key); 413 return this; 414 } 415 416 public TableDescriptorBuilder removeColumnFamily(final byte[] name) { 417 desc.removeColumnFamily(name); 418 return this; 419 } 420 421 public TableDescriptorBuilder removeCoprocessor(String className) { 422 desc.removeCoprocessor(className); 423 return this; 424 } 425 426 public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) { 427 desc.setCompactionEnabled(isEnable); 428 return this; 429 } 430 431 public TableDescriptorBuilder setSplitEnabled(final boolean isEnable) { 432 desc.setSplitEnabled(isEnable); 433 return this; 434 } 435 436 public TableDescriptorBuilder setMergeEnabled(final boolean isEnable) { 437 desc.setMergeEnabled(isEnable); 438 return this; 439 } 440 441 public TableDescriptorBuilder setDurability(Durability durability) { 442 desc.setDurability(durability); 443 return this; 444 } 445 446 public TableDescriptorBuilder setFlushPolicyClassName(String clazz) { 447 desc.setFlushPolicyClassName(clazz); 448 return this; 449 } 450 451 public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { 452 desc.setMaxFileSize(maxFileSize); 453 return this; 454 } 455 456 public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { 457 desc.setMemStoreFlushSize(memstoreFlushSize); 458 return this; 459 } 460 461 public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) { 462 desc.setNormalizerTargetRegionCount(regionCount); 463 return this; 464 } 465 466 public TableDescriptorBuilder setNormalizerTargetRegionSize(final long regionSize) { 467 desc.setNormalizerTargetRegionSize(regionSize); 468 return this; 469 } 470 471 public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { 472 desc.setNormalizationEnabled(isEnable); 473 return this; 474 } 475 476 /** 477 * @deprecated since 2.0.0 and will be removed in 3.0.0. 478 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 479 */ 480 @Deprecated 481 public TableDescriptorBuilder setOwner(User owner) { 482 desc.setOwner(owner); 483 return this; 484 } 485 486 /** 487 * @deprecated since 2.0.0 and will be removed in 3.0.0. 488 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 489 */ 490 @Deprecated 491 public TableDescriptorBuilder setOwnerString(String ownerString) { 492 desc.setOwnerString(ownerString); 493 return this; 494 } 495 496 public TableDescriptorBuilder setPriority(int priority) { 497 desc.setPriority(priority); 498 return this; 499 } 500 501 public TableDescriptorBuilder setReadOnly(final boolean readOnly) { 502 desc.setReadOnly(readOnly); 503 return this; 504 } 505 506 public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) { 507 desc.setRegionMemStoreReplication(memstoreReplication); 508 return this; 509 } 510 511 public TableDescriptorBuilder setRegionReplication(int regionReplication) { 512 desc.setRegionReplication(regionReplication); 513 return this; 514 } 515 516 public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) { 517 desc.setRegionSplitPolicyClassName(clazz); 518 return this; 519 } 520 521 public TableDescriptorBuilder setValue(final String key, final String value) { 522 desc.setValue(key, value); 523 return this; 524 } 525 526 public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) { 527 desc.setValue(key, value); 528 return this; 529 } 530 531 public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) { 532 desc.setValue(key, value); 533 return this; 534 } 535 536 /** 537 * Sets replication scope all & only the columns already in the builder. Columns added later won't 538 * be backfilled with replication scope. 539 * @param scope replication scope 540 * @return a TableDescriptorBuilder 541 */ 542 public TableDescriptorBuilder setReplicationScope(int scope) { 543 Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 544 newFamilies.putAll(desc.families); 545 newFamilies.forEach((cf, cfDesc) -> { 546 desc.removeColumnFamily(cf); 547 desc 548 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope).build()); 549 }); 550 return this; 551 } 552 553 /** 554 * Set the RSGroup for this table, specified RSGroup must exist before create or modify table. 555 * @param group rsgroup name 556 * @return a TableDescriptorBuilder 557 */ 558 public TableDescriptorBuilder setRegionServerGroup(String group) { 559 desc.setValue(RSGROUP_KEY, group); 560 return this; 561 } 562 563 public TableDescriptor build() { 564 return new ModifyableTableDescriptor(desc); 565 } 566 567 /** 568 * TODO: make this private after removing the HTableDescriptor 569 */ 570 @InterfaceAudience.Private 571 public static class ModifyableTableDescriptor 572 implements TableDescriptor, Comparable<ModifyableTableDescriptor> { 573 574 private final TableName name; 575 576 /** 577 * A map which holds the metadata information of the table. This metadata includes values like 578 * IS_META, SPLIT_POLICY, MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc... 579 */ 580 private final Map<Bytes, Bytes> values = new HashMap<>(); 581 582 /** 583 * Maps column family name to the respective FamilyDescriptors 584 */ 585 private final Map<byte[], ColumnFamilyDescriptor> families = 586 new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 587 588 /** 589 * Construct a table descriptor specifying a TableName object 590 * @param name Table name. TODO: make this private after removing the HTableDescriptor 591 */ 592 @InterfaceAudience.Private 593 public ModifyableTableDescriptor(final TableName name) { 594 this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP); 595 } 596 597 private ModifyableTableDescriptor(final TableDescriptor desc) { 598 this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 599 } 600 601 /** 602 * Construct a table descriptor by cloning the descriptor passed as a parameter. 603 * <p> 604 * Makes a deep copy of the supplied descriptor. 605 * @param name The new name 606 * @param desc The descriptor. TODO: make this private after removing the HTableDescriptor 607 */ 608 @InterfaceAudience.Private 609 @Deprecated // only used by HTableDescriptor. remove this method if HTD is removed 610 public ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) { 611 this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 612 } 613 614 private ModifyableTableDescriptor(final TableName name, 615 final Collection<ColumnFamilyDescriptor> families, Map<Bytes, Bytes> values) { 616 this.name = name; 617 families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); 618 this.values.putAll(values); 619 this.values.put(IS_META_KEY, 620 new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); 621 } 622 623 /** 624 * Checks if this table is <code> hbase:meta </code> region. 625 * @return true if this table is <code> hbase:meta </code> region 626 */ 627 @Override 628 public boolean isMetaRegion() { 629 return getOrDefault(IS_META_KEY, Boolean::valueOf, false); 630 } 631 632 /** 633 * Checks if the table is a <code>hbase:meta</code> table 634 * @return true if table is <code> hbase:meta </code> region. 635 */ 636 @Override 637 public boolean isMetaTable() { 638 return isMetaRegion(); 639 } 640 641 @Override 642 public Bytes getValue(Bytes key) { 643 Bytes rval = values.get(key); 644 return rval == null ? null : new Bytes(rval.copyBytes()); 645 } 646 647 @Override 648 public String getValue(String key) { 649 Bytes rval = values.get(new Bytes(Bytes.toBytes(key))); 650 return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength()); 651 } 652 653 @Override 654 public byte[] getValue(byte[] key) { 655 Bytes value = values.get(new Bytes(key)); 656 return value == null ? null : value.copyBytes(); 657 } 658 659 private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) { 660 Bytes value = values.get(key); 661 if (value == null) { 662 return defaultValue; 663 } else { 664 return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 665 } 666 } 667 668 /** 669 * Getter for fetching an unmodifiable {@link #values} map. 670 * @return unmodifiable map {@link #values}. 671 * @see #values 672 */ 673 @Override 674 public Map<Bytes, Bytes> getValues() { 675 // shallow pointer copy 676 return Collections.unmodifiableMap(values); 677 } 678 679 /** 680 * Setter for storing metadata as a (key, value) pair in {@link #values} map 681 * @param key The key. 682 * @param value The value. If null, removes the setting. 683 * @return the modifyable TD 684 * @see #values 685 */ 686 public ModifyableTableDescriptor setValue(byte[] key, byte[] value) { 687 return setValue(toBytesOrNull(key, v -> v), toBytesOrNull(value, v -> v)); 688 } 689 690 public ModifyableTableDescriptor setValue(String key, String value) { 691 return setValue(toBytesOrNull(key, Bytes::toBytes), toBytesOrNull(value, Bytes::toBytes)); 692 } 693 694 /** 695 * @param key The key. 696 * @param value The value. If null, removes the setting. 697 */ 698 private ModifyableTableDescriptor setValue(final Bytes key, final String value) { 699 return setValue(key, toBytesOrNull(value, Bytes::toBytes)); 700 } 701 702 /** 703 * Setter for storing metadata as a (key, value) pair in {@link #values} map 704 * @param key The key. 705 * @param value The value. If null, removes the setting. 706 */ 707 public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { 708 if (value == null || value.getLength() == 0) { 709 values.remove(key); 710 } else { 711 values.put(key, value); 712 } 713 return this; 714 } 715 716 private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) { 717 if (t == null) { 718 return null; 719 } else { 720 return new Bytes(f.apply(t)); 721 } 722 } 723 724 /** 725 * Remove metadata represented by the key from the {@link #values} map 726 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 727 * @return the modifyable TD 728 */ 729 public ModifyableTableDescriptor removeValue(Bytes key) { 730 return setValue(key, (Bytes) null); 731 } 732 733 /** 734 * Remove metadata represented by the key from the {@link #values} map 735 * @param key Key whose key and value we're to remove from TableDescriptor parameters. 736 * @return the modifyable TD 737 */ 738 public ModifyableTableDescriptor removeValue(final byte[] key) { 739 return removeValue(new Bytes(key)); 740 } 741 742 /** 743 * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents 744 * of the table can only be read from but not modified. 745 * @return true if all columns in the table should be read only 746 */ 747 @Override 748 public boolean isReadOnly() { 749 return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY); 750 } 751 752 /** 753 * Setting the table as read only sets all the columns in the table as read only. By default all 754 * tables are modifiable, but if the readOnly flag is set to true then the contents of the table 755 * can only be read but not modified. 756 * @param readOnly True if all of the columns in the table should be read only. 757 * @return the modifyable TD 758 */ 759 public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { 760 return setValue(READONLY_KEY, Boolean.toString(readOnly)); 761 } 762 763 /** 764 * Check if the compaction enable flag of the table is true. If flag is false then no 765 * minor/major compactions will be done in real. 766 * @return true if table compaction enabled 767 */ 768 @Override 769 public boolean isCompactionEnabled() { 770 return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED); 771 } 772 773 /** 774 * Setting the table compaction enable flag. 775 * @param isEnable True if enable compaction. 776 * @return the modifyable TD 777 */ 778 public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) { 779 return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable)); 780 } 781 782 /** 783 * Check if the split enable flag of the table is true. If flag is false then no split will be 784 * done. 785 * @return true if table region split enabled 786 */ 787 @Override 788 public boolean isSplitEnabled() { 789 return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED); 790 } 791 792 /** 793 * Setting the table region split enable flag. 794 * @param isEnable True if enable region split. 795 * @return the modifyable TD 796 */ 797 public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) { 798 return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable)); 799 } 800 801 /** 802 * Check if the region merge enable flag of the table is true. If flag is false then no merge 803 * will be done. 804 * @return true if table region merge enabled 805 */ 806 @Override 807 public boolean isMergeEnabled() { 808 return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED); 809 } 810 811 /** 812 * Setting the table region merge enable flag. 813 * @param isEnable True if enable region merge. 814 * @return the modifyable TD 815 */ 816 public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) { 817 return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable)); 818 } 819 820 /** 821 * Check if normalization enable flag of the table is true. If flag is false then no region 822 * normalizer won't attempt to normalize this table. 823 * @return true if region normalization is enabled for this table 824 **/ 825 @Override 826 public boolean isNormalizationEnabled() { 827 return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, 828 DEFAULT_NORMALIZATION_ENABLED); 829 } 830 831 /** 832 * Check if there is the target region count. If so, the normalize plan will be calculated based 833 * on the target region count. 834 * @return target region count after normalize done 835 */ 836 @Override 837 public int getNormalizerTargetRegionCount() { 838 return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf, 839 Integer.valueOf(-1)); 840 } 841 842 /** 843 * Check if there is the target region size. If so, the normalize plan will be calculated based 844 * on the target region size. 845 * @return target region size after normalize done 846 */ 847 @Override 848 public long getNormalizerTargetRegionSize() { 849 return getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)); 850 } 851 852 /** 853 * Setting the table normalization enable flag. 854 * @param isEnable True if enable normalization. 855 * @return the modifyable TD 856 */ 857 public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) { 858 return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable)); 859 } 860 861 /** 862 * Setting the target region count of table normalization . 863 * @param regionCount the target region count. 864 * @return the modifyable TD 865 */ 866 public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) { 867 return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount)); 868 } 869 870 /** 871 * Setting the target region size of table normalization. 872 * @param regionSize the target region size. 873 * @return the modifyable TD 874 */ 875 public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) { 876 return setValue(NORMALIZER_TARGET_REGION_SIZE_KEY, Long.toString(regionSize)); 877 } 878 879 /** 880 * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT. 881 * @param durability enum value 882 * @return the modifyable TD 883 */ 884 public ModifyableTableDescriptor setDurability(Durability durability) { 885 return setValue(DURABILITY_KEY, durability.name()); 886 } 887 888 /** 889 * Returns the durability setting for the table. 890 * @return durability setting for the table. 891 */ 892 @Override 893 public Durability getDurability() { 894 return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY); 895 } 896 897 /** 898 * Get the name of the table 899 */ 900 @Override 901 public TableName getTableName() { 902 return name; 903 } 904 905 /** 906 * This sets the class associated with the region split policy which determines when a region 907 * split should occur. The class used by default is defined in 908 * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 909 * @param clazz the class name 910 * @return the modifyable TD 911 */ 912 public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) { 913 return setValue(SPLIT_POLICY_KEY, clazz); 914 } 915 916 /** 917 * This gets the class associated with the region split policy which determines when a region 918 * split should occur. The class used by default is defined in 919 * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 920 * @return the class name of the region split policy for this table. If this returns null, the 921 * default split policy is used. 922 */ 923 @Override 924 public String getRegionSplitPolicyClassName() { 925 return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null); 926 } 927 928 /** 929 * Returns the maximum size upto which a region can grow to after which a region split is 930 * triggered. The region size is represented by the size of the biggest store file in that 931 * region. 932 * @return max hregion size for table, -1 if not set. 933 * @see #setMaxFileSize(long) 934 */ 935 @Override 936 public long getMaxFileSize() { 937 return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1); 938 } 939 940 /** 941 * Sets the maximum size upto which a region can grow to after which a region split is 942 * triggered. The region size is represented by the size of the biggest store file in that 943 * region, i.e. If the biggest store file grows beyond the maxFileSize, then the region split is 944 * triggered. This defaults to a value of 256 MB. 945 * <p> 946 * This is not an absolute value and might vary. Assume that a single row exceeds the 947 * maxFileSize then the storeFileSize will be greater than maxFileSize since a single row cannot 948 * be split across multiple regions 949 * </p> 950 * @param maxFileSize The maximum file size that a store file can grow to before a split is 951 * triggered. 952 * @return the modifyable TD 953 */ 954 public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { 955 return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); 956 } 957 958 /** 959 * Returns the size of the memstore after which a flush to filesystem is triggered. 960 * @return memory cache flush size for each hregion, -1 if not set. 961 * @see #setMemStoreFlushSize(long) 962 */ 963 @Override 964 public long getMemStoreFlushSize() { 965 return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1); 966 } 967 968 /** 969 * Represents the maximum size of the memstore after which the contents of the memstore are 970 * flushed to the filesystem. This defaults to a size of 64 MB. 971 * @param memstoreFlushSize memory cache flush size for each hregion 972 * @return the modifyable TD 973 */ 974 public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { 975 return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); 976 } 977 978 /** 979 * This sets the class associated with the flush policy which determines determines the stores 980 * need to be flushed when flushing a region. The class used by default is defined in 981 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 982 * @param clazz the class name 983 * @return the modifyable TD 984 */ 985 public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) { 986 return setValue(FLUSH_POLICY_KEY, clazz); 987 } 988 989 /** 990 * This gets the class associated with the flush policy which determines the stores need to be 991 * flushed when flushing a region. The class used by default is defined in 992 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 993 * @return the class name of the flush policy for this table. If this returns null, the default 994 * flush policy is used. 995 */ 996 @Override 997 public String getFlushPolicyClassName() { 998 return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null); 999 } 1000 1001 /** 1002 * Adds a column family. For the updating purpose please use 1003 * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. 1004 * @param family to add. 1005 * @return the modifyable TD 1006 */ 1007 public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) { 1008 if (family.getName() == null || family.getName().length <= 0) { 1009 throw new IllegalArgumentException("Family name cannot be null or empty"); 1010 } 1011 int flength = family.getName() == null ? 0 : family.getName().length; 1012 if (flength > Byte.MAX_VALUE) { 1013 throw new IllegalArgumentException( 1014 "The length of family name is bigger than " + Byte.MAX_VALUE); 1015 } 1016 if (hasColumnFamily(family.getName())) { 1017 throw new IllegalArgumentException( 1018 "Family '" + family.getNameAsString() + "' already exists so cannot be added"); 1019 } 1020 return putColumnFamily(family); 1021 } 1022 1023 /** 1024 * Modifies the existing column family. 1025 * @param family to update 1026 * @return this (for chained invocation) 1027 */ 1028 public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) { 1029 if (family.getName() == null || family.getName().length <= 0) { 1030 throw new IllegalArgumentException("Family name cannot be null or empty"); 1031 } 1032 if (!hasColumnFamily(family.getName())) { 1033 throw new IllegalArgumentException( 1034 "Column family '" + family.getNameAsString() + "' does not exist"); 1035 } 1036 return putColumnFamily(family); 1037 } 1038 1039 private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) { 1040 families.put(family.getName(), family); 1041 return this; 1042 } 1043 1044 /** 1045 * Checks to see if this table contains the given column family 1046 * @param familyName Family name or column name. 1047 * @return true if the table contains the specified family name 1048 */ 1049 @Override 1050 public boolean hasColumnFamily(final byte[] familyName) { 1051 return families.containsKey(familyName); 1052 } 1053 1054 /** Returns Name of this table and then a map of all of the column family descriptors. */ 1055 @Override 1056 public String toString() { 1057 StringBuilder s = new StringBuilder(); 1058 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1059 s.append(getValues(true)); 1060 families.values().forEach(f -> s.append(", ").append(f)); 1061 return s.toString(); 1062 } 1063 1064 /** 1065 * @return Name of this table and then a map of all of the column family descriptors (with only 1066 * the non-default column family attributes) 1067 */ 1068 @Override 1069 public String toStringCustomizedValues() { 1070 StringBuilder s = new StringBuilder(); 1071 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1072 s.append(getValues(false)); 1073 families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues())); 1074 return s.toString(); 1075 } 1076 1077 /** Returns map of all table attributes formatted into string. */ 1078 public String toStringTableAttributes() { 1079 return getValues(true).toString(); 1080 } 1081 1082 private StringBuilder getValues(boolean printDefaults) { 1083 StringBuilder s = new StringBuilder(); 1084 1085 // step 1: set partitioning and pruning 1086 Set<Bytes> reservedKeys = new TreeSet<>(); 1087 Set<Bytes> userKeys = new TreeSet<>(); 1088 for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) { 1089 if (entry.getKey() == null || entry.getKey().get() == null) { 1090 continue; 1091 } 1092 String key = Bytes.toString(entry.getKey().get()); 1093 // in this section, print out reserved keywords + coprocessor info 1094 if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) { 1095 userKeys.add(entry.getKey()); 1096 continue; 1097 } 1098 // only print out IS_META if true 1099 String value = Bytes.toString(entry.getValue().get()); 1100 if (key.equalsIgnoreCase(IS_META)) { 1101 if (Boolean.valueOf(value) == false) { 1102 continue; 1103 } 1104 } 1105 // see if a reserved key is a default value. may not want to print it out 1106 if ( 1107 printDefaults || !DEFAULT_VALUES.containsKey(key) 1108 || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value) 1109 ) { 1110 reservedKeys.add(entry.getKey()); 1111 } 1112 } 1113 1114 // early exit optimization 1115 boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty(); 1116 if (!hasAttributes) { 1117 return s; 1118 } 1119 1120 s.append(", {"); 1121 // step 2: printing attributes 1122 if (hasAttributes) { 1123 s.append("TABLE_ATTRIBUTES => {"); 1124 1125 // print all reserved keys first 1126 boolean printCommaForAttr = false; 1127 for (Bytes k : reservedKeys) { 1128 String key = Bytes.toString(k.get()); 1129 String value = Bytes.toStringBinary(values.get(k).get()); 1130 if (printCommaForAttr) { 1131 s.append(", "); 1132 } 1133 printCommaForAttr = true; 1134 s.append(key); 1135 s.append(" => "); 1136 s.append('\'').append(value).append('\''); 1137 } 1138 1139 if (!userKeys.isEmpty()) { 1140 // print all non-reserved as a separate subset 1141 if (printCommaForAttr) { 1142 s.append(", "); 1143 } 1144 s.append(HConstants.METADATA).append(" => "); 1145 s.append("{"); 1146 boolean printCommaForCfg = false; 1147 for (Bytes k : userKeys) { 1148 String key = Bytes.toString(k.get()); 1149 String value = Bytes.toStringBinary(values.get(k).get()); 1150 if (printCommaForCfg) { 1151 s.append(", "); 1152 } 1153 printCommaForCfg = true; 1154 s.append('\'').append(key).append('\''); 1155 s.append(" => "); 1156 s.append('\'').append(value).append('\''); 1157 } 1158 s.append("}"); 1159 } 1160 1161 s.append("}"); 1162 } 1163 1164 s.append("}"); // end METHOD 1165 return s; 1166 } 1167 1168 /** 1169 * Compare the contents of the descriptor with another one passed as a parameter. Checks if the 1170 * obj passed is an instance of ModifyableTableDescriptor, if yes then the contents of the 1171 * descriptors are compared. 1172 * @param obj The object to compare 1173 * @return true if the contents of the the two descriptors exactly match 1174 * @see java.lang.Object#equals(java.lang.Object) 1175 */ 1176 @Override 1177 public boolean equals(Object obj) { 1178 if (this == obj) { 1179 return true; 1180 } 1181 if (obj instanceof ModifyableTableDescriptor) { 1182 return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0; 1183 } 1184 return false; 1185 } 1186 1187 /** Returns hash code */ 1188 @Override 1189 public int hashCode() { 1190 int result = this.name.hashCode(); 1191 if (this.families.size() > 0) { 1192 for (ColumnFamilyDescriptor e : this.families.values()) { 1193 result ^= e.hashCode(); 1194 } 1195 } 1196 result ^= values.hashCode(); 1197 return result; 1198 } 1199 1200 // Comparable 1201 /** 1202 * Compares the descriptor with another descriptor which is passed as a parameter. This compares 1203 * the content of the two descriptors and not the reference. 1204 * @param other The MTD to compare 1205 * @return 0 if the contents of the descriptors are exactly matching, 1 if there is a mismatch 1206 * in the contents 1207 */ 1208 @Override 1209 public int compareTo(final ModifyableTableDescriptor other) { 1210 return TableDescriptor.COMPARATOR.compare(this, other); 1211 } 1212 1213 @Override 1214 public ColumnFamilyDescriptor[] getColumnFamilies() { 1215 return families.values().toArray(new ColumnFamilyDescriptor[families.size()]); 1216 } 1217 1218 /** 1219 * Returns the configured replicas per region 1220 */ 1221 @Override 1222 public int getRegionReplication() { 1223 return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION); 1224 } 1225 1226 /** 1227 * Sets the number of replicas per region. 1228 * @param regionReplication the replication factor per region 1229 * @return the modifyable TD 1230 */ 1231 public ModifyableTableDescriptor setRegionReplication(int regionReplication) { 1232 return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication)); 1233 } 1234 1235 /** Returns true if the read-replicas memstore replication is enabled. */ 1236 @Override 1237 public boolean hasRegionMemStoreReplication() { 1238 return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, 1239 DEFAULT_REGION_MEMSTORE_REPLICATION); 1240 } 1241 1242 /** 1243 * Enable or Disable the memstore replication from the primary region to the replicas. The 1244 * replication will be used only for meta operations (e.g. flush, compaction, ...) 1245 * @param memstoreReplication true if the new data written to the primary region should be 1246 * replicated. false if the secondaries can tollerate to have new 1247 * data only when the primary flushes the memstore. 1248 * @return the modifyable TD 1249 */ 1250 public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { 1251 return setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication)); 1252 } 1253 1254 public ModifyableTableDescriptor setPriority(int priority) { 1255 return setValue(PRIORITY_KEY, Integer.toString(priority)); 1256 } 1257 1258 @Override 1259 public int getPriority() { 1260 return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY); 1261 } 1262 1263 /** 1264 * Returns all the column family names of the current table. The map of TableDescriptor contains 1265 * mapping of family name to ColumnFamilyDescriptor. This returns all the keys of the family map 1266 * which represents the column family names of the table. 1267 * @return Immutable sorted set of the keys of the families. 1268 */ 1269 @Override 1270 public Set<byte[]> getColumnFamilyNames() { 1271 return Collections.unmodifiableSet(this.families.keySet()); 1272 } 1273 1274 /** 1275 * Returns the ColumnFamilyDescriptor for a specific column family with name as specified by the 1276 * parameter column. 1277 * @param column Column family name 1278 * @return Column descriptor for the passed family name or the family on passed in column. 1279 */ 1280 @Override 1281 public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { 1282 return this.families.get(column); 1283 } 1284 1285 /** 1286 * Removes the ColumnFamilyDescriptor with name specified by the parameter column from the table 1287 * descriptor 1288 * @param column Name of the column family to be removed. 1289 * @return Column descriptor for the passed family name or the family on passed in column. 1290 */ 1291 public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { 1292 return this.families.remove(column); 1293 } 1294 1295 /** 1296 * Add a table coprocessor to this table. The coprocessor type must be 1297 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1298 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1299 * region is opened. 1300 * @param className Full class name. 1301 * @return the modifyable TD 1302 */ 1303 public ModifyableTableDescriptor setCoprocessor(String className) throws IOException { 1304 return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) 1305 .setPriority(Coprocessor.PRIORITY_USER).build()); 1306 } 1307 1308 /** 1309 * Add a table coprocessor to this table. The coprocessor type must be 1310 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1311 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1312 * region is opened. 1313 * @throws IOException any illegal parameter key/value 1314 * @return the modifyable TD 1315 */ 1316 public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throws IOException { 1317 checkHasCoprocessor(cp.getClassName()); 1318 if (cp.getPriority() < 0) { 1319 throw new IOException( 1320 "Priority must be bigger than or equal with zero, current:" + cp.getPriority()); 1321 } 1322 // Validate parameter kvs and then add key/values to kvString. 1323 StringBuilder kvString = new StringBuilder(); 1324 for (Map.Entry<String, String> e : cp.getProperties().entrySet()) { 1325 if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) { 1326 throw new IOException("Illegal parameter key = " + e.getKey()); 1327 } 1328 if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { 1329 throw new IOException("Illegal parameter (" + e.getKey() + ") value = " + e.getValue()); 1330 } 1331 if (kvString.length() != 0) { 1332 kvString.append(','); 1333 } 1334 kvString.append(e.getKey()); 1335 kvString.append('='); 1336 kvString.append(e.getValue()); 1337 } 1338 1339 String value = cp.getJarPath().orElse("") + "|" + cp.getClassName() + "|" 1340 + Integer.toString(cp.getPriority()) + "|" + kvString.toString(); 1341 return setCoprocessorToMap(value); 1342 } 1343 1344 /** 1345 * Add a table coprocessor to this table. The coprocessor type must be 1346 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class 1347 * can be loaded or not. Whether a coprocessor is loadable or not will be determined when a 1348 * region is opened. 1349 * @param specStr The Coprocessor specification all in in one String 1350 * @return the modifyable TD 1351 * @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed 1352 * in HBase 3.0.0. 1353 */ 1354 @Deprecated 1355 public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr) 1356 throws IOException { 1357 CoprocessorDescriptor cpDesc = 1358 toCoprocessorDescriptor(specStr).orElseThrow(() -> new IllegalArgumentException( 1359 "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr)); 1360 checkHasCoprocessor(cpDesc.getClassName()); 1361 return setCoprocessorToMap(specStr); 1362 } 1363 1364 private void checkHasCoprocessor(final String className) throws IOException { 1365 if (hasCoprocessor(className)) { 1366 throw new IOException("Coprocessor " + className + " already exists."); 1367 } 1368 } 1369 1370 /** 1371 * Add coprocessor to values Map 1372 * @param specStr The Coprocessor specification all in in one String 1373 * @return Returns <code>this</code> 1374 */ 1375 private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) { 1376 if (specStr == null) { 1377 return this; 1378 } 1379 // generate a coprocessor key 1380 int maxCoprocessorNumber = 0; 1381 Matcher keyMatcher; 1382 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1383 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1384 if (!keyMatcher.matches()) { 1385 continue; 1386 } 1387 maxCoprocessorNumber = 1388 Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); 1389 } 1390 maxCoprocessorNumber++; 1391 String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); 1392 return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr))); 1393 } 1394 1395 /** 1396 * Check if the table has an attached co-processor represented by the name className 1397 * @param classNameToMatch - Class name of the co-processor 1398 * @return true of the table has a co-processor className 1399 */ 1400 @Override 1401 public boolean hasCoprocessor(String classNameToMatch) { 1402 return getCoprocessorDescriptors().stream() 1403 .anyMatch(cp -> cp.getClassName().equals(classNameToMatch)); 1404 } 1405 1406 /** 1407 * Return the list of attached co-processor represented by their name className 1408 * @return The list of co-processors classNames 1409 */ 1410 @Override 1411 public List<CoprocessorDescriptor> getCoprocessorDescriptors() { 1412 List<CoprocessorDescriptor> result = new ArrayList<>(); 1413 for (Map.Entry<Bytes, Bytes> e : getValues().entrySet()) { 1414 String key = Bytes.toString(e.getKey().get()).trim(); 1415 if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) { 1416 toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()).ifPresent(result::add); 1417 } 1418 } 1419 return result; 1420 } 1421 1422 /** 1423 * Remove a coprocessor from those set on the table 1424 * @param className Class name of the co-processor 1425 */ 1426 public void removeCoprocessor(String className) { 1427 Bytes match = null; 1428 Matcher keyMatcher; 1429 Matcher valueMatcher; 1430 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1431 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1432 if (!keyMatcher.matches()) { 1433 continue; 1434 } 1435 valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes.toString(e.getValue().get())); 1436 if (!valueMatcher.matches()) { 1437 continue; 1438 } 1439 // get className and compare 1440 String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field 1441 // remove the CP if it is present 1442 if (clazz.equals(className.trim())) { 1443 match = e.getKey(); 1444 break; 1445 } 1446 } 1447 // if we found a match, remove it 1448 if (match != null) { 1449 ModifyableTableDescriptor.this.removeValue(match); 1450 } 1451 } 1452 1453 /** 1454 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1455 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1456 */ 1457 @Deprecated 1458 public ModifyableTableDescriptor setOwner(User owner) { 1459 return setOwnerString(owner != null ? owner.getShortName() : null); 1460 } 1461 1462 /** 1463 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1464 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1465 */ 1466 // used by admin.rb:alter(table_name,*args) to update owner. 1467 @Deprecated 1468 public ModifyableTableDescriptor setOwnerString(String ownerString) { 1469 return setValue(OWNER_KEY, ownerString); 1470 } 1471 1472 /** 1473 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1474 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1475 */ 1476 @Override 1477 @Deprecated 1478 public String getOwnerString() { 1479 // Note that every table should have an owner (i.e. should have OWNER_KEY set). 1480 // hbase:meta should return system user as owner, not null (see 1481 // MasterFileSystem.java:bootstrap()). 1482 return getOrDefault(OWNER_KEY, Function.identity(), null); 1483 } 1484 1485 /** Returns the bytes in pb format */ 1486 private byte[] toByteArray() { 1487 return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray()); 1488 } 1489 1490 /** 1491 * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix 1492 * @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code> 1493 * @see #toByteArray() 1494 */ 1495 private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException { 1496 if (!ProtobufUtil.isPBMagicPrefix(bytes)) { 1497 throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); 1498 } 1499 int pblen = ProtobufUtil.lengthOfPBMagic(); 1500 HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder(); 1501 try { 1502 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); 1503 return ProtobufUtil.toTableDescriptor(builder.build()); 1504 } catch (IOException e) { 1505 throw new DeserializationException(e); 1506 } 1507 } 1508 1509 @Override 1510 public int getColumnFamilyCount() { 1511 return families.size(); 1512 } 1513 1514 @Override 1515 public Optional<String> getRegionServerGroup() { 1516 Bytes value = values.get(RSGROUP_KEY); 1517 if (value != null) { 1518 return Optional.of(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 1519 } else { 1520 return Optional.empty(); 1521 } 1522 } 1523 } 1524 1525 private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) { 1526 Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec); 1527 if (matcher.matches()) { 1528 // jar file path can be empty if the cp class can be loaded 1529 // from class loader. 1530 String path = matcher.group(1).trim().isEmpty() ? null : matcher.group(1).trim(); 1531 String className = matcher.group(2).trim(); 1532 if (className.isEmpty()) { 1533 return Optional.empty(); 1534 } 1535 String priorityStr = matcher.group(3).trim(); 1536 int priority = 1537 priorityStr.isEmpty() ? Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); 1538 String cfgSpec = null; 1539 try { 1540 cfgSpec = matcher.group(4); 1541 } catch (IndexOutOfBoundsException ex) { 1542 // ignore 1543 } 1544 Map<String, String> ourConf = new TreeMap<>(); 1545 if (cfgSpec != null && !cfgSpec.trim().equals("|")) { 1546 cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1); 1547 Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec); 1548 while (m.find()) { 1549 ourConf.put(m.group(1), m.group(2)); 1550 } 1551 } 1552 return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) 1553 .setPriority(priority).setProperties(ourConf).build()); 1554 } 1555 return Optional.empty(); 1556 } 1557}